gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from sklearn.base import BaseEstimator, is_classifier, clone
from sklearn.base import MetaEstimatorMixin, ChangedBehaviorWarning
from ._split import check_cv
from ._validation import _fit_and_score
from sklearn.externals.joblib import Parallel, delayed
from sklearn.externals import six
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.validation import _num_samples, indexable
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, labels, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y, labels = indexable(X, y, labels)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
n_splits = cv.get_n_splits(X, y, labels)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv.split(X, y, labels))
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_splits):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_splits]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_splits)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, labels=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, labels, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None, labels=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, labels, sampled_params)
|
|
"""
Module for fetching artifacts from Nexus 3.x
.. versionadded:: 2018.3.0
"""
import base64
import http.client
import logging
import os
import urllib.request
from urllib.error import HTTPError, URLError
import salt.utils.files
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
try:
import xml.etree.ElementTree as ET
HAS_ELEMENT_TREE = True
except ImportError:
HAS_ELEMENT_TREE = False
log = logging.getLogger(__name__)
__virtualname__ = "nexus"
def __virtual__():
"""
Only load if elementtree xml library is available.
"""
if not HAS_ELEMENT_TREE:
return (
False,
"Cannot load {} module: ElementTree library unavailable".format(
__virtualname__
),
)
else:
return True
def get_latest_snapshot(
nexus_url,
repository,
group_id,
artifact_id,
packaging,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
):
"""
Gets latest snapshot of the given artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: nexus.get_latest_snapshot,"
" nexus_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s,"
" target_dir=%s, classifier=%s)",
nexus_url,
repository,
group_id,
artifact_id,
packaging,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
base64.encodestring("{}:{}".format(username, password)).replace("\n", "")
)
artifact_metadata = _get_artifact_metadata(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
)
version = artifact_metadata["latest_version"]
snapshot_url, file_name = _get_snapshot_url(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=classifier,
headers=headers,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_snapshot(
nexus_url,
repository,
group_id,
artifact_id,
packaging,
version,
snapshot_version=None,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
):
"""
Gets snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: nexus.get_snapshot(nexus_url=%s,"
" repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s,"
" target_dir=%s, classifier=%s)",
nexus_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
base64.encodestring("{}:{}".format(username, password)).replace("\n", "")
)
snapshot_url, file_name = _get_snapshot_url(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
snapshot_version=snapshot_version,
classifier=classifier,
headers=headers,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_snapshot_version_string(
nexus_url,
repository,
group_id,
artifact_id,
packaging,
version,
classifier=None,
username=None,
password=None,
):
"""
Gets the specific version string of a snapshot of the desired version of the artifact
nexus_url
URL of nexus instance
repository
Snapshot repository in nexus to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" nexus.get_snapshot_version_string(nexus_url=%s, repository=%s, group_id=%s,"
" artifact_id=%s, packaging=%s, version=%s, classifier=%s)",
nexus_url,
repository,
group_id,
artifact_id,
packaging,
version,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
base64.encodestring("{}:{}".format(username, password)).replace("\n", "")
)
return _get_snapshot_url(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=classifier,
just_get_version_string=True,
)
def get_latest_release(
nexus_url,
repository,
group_id,
artifact_id,
packaging,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
):
"""
Gets the latest release of the artifact
nexus_url
URL of nexus instance
repository
Release repository in nexus to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" nexus.get_latest_release(nexus_url=%s, repository=%s, group_id=%s,"
" artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
nexus_url,
repository,
group_id,
artifact_id,
packaging,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
base64.encodestring("{}:{}".format(username, password)).replace("\n", "")
)
artifact_metadata = _get_artifact_metadata(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
)
version = artifact_metadata["latest_version"]
release_url, file_name = _get_release_url(
repository, group_id, artifact_id, packaging, version, nexus_url, classifier
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def get_release(
nexus_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
):
"""
Gets the specified release of the artifact
nexus_url
URL of nexus instance
repository
Release repository in nexus to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
nexus username. Optional parameter.
password
nexus password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: nexus.get_release(nexus_url=%s,"
" repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s,"
" target_dir=%s, classifier=%s)",
nexus_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
base64.encodestring("{}:{}".format(username, password)).replace("\n", "")
)
release_url, file_name = _get_release_url(
repository, group_id, artifact_id, packaging, version, nexus_url, classifier
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def __resolve_target_file(file_name, target_dir, target_file=None):
if target_file is None:
target_file = os.path.join(target_dir, file_name)
return target_file
def _get_snapshot_url(
nexus_url,
repository,
group_id,
artifact_id,
version,
packaging,
snapshot_version=None,
classifier=None,
headers=None,
just_get_version_string=None,
):
if headers is None:
headers = {}
has_classifier = classifier is not None and classifier != ""
if snapshot_version is None:
snapshot_version_metadata = _get_snapshot_version_metadata(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
headers=headers,
)
if packaging not in snapshot_version_metadata["snapshot_versions"]:
error_message = """Cannot find requested packaging '{packaging}' in the snapshot version metadata.
nexus_url: {nexus_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
version: {version}""".format(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
packaging=packaging,
classifier=classifier,
version=version,
)
raise nexusError(error_message)
if (
has_classifier
and classifier not in snapshot_version_metadata["snapshot_versions"]
):
error_message = """Cannot find requested classifier '{classifier}' in the snapshot version metadata.
nexus_url: {nexus_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
version: {version}""".format(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
packaging=packaging,
classifier=classifier,
version=version,
)
raise nexusError(error_message)
snapshot_version = snapshot_version_metadata["snapshot_versions"][packaging]
group_url = __get_group_id_subpath(group_id)
file_name = "{artifact_id}-{snapshot_version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
snapshot_version=snapshot_version,
packaging=packaging,
classifier=__get_classifier_url(classifier),
)
snapshot_url = "{nexus_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
nexus_url=nexus_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
file_name=file_name,
)
log.debug("snapshot_url=%s", snapshot_url)
if just_get_version_string:
return snapshot_version
else:
return snapshot_url, file_name
def _get_release_url(
repository, group_id, artifact_id, packaging, version, nexus_url, classifier=None
):
group_url = __get_group_id_subpath(group_id)
# for released versions the suffix for the file is same as version
file_name = "{artifact_id}-{version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=__get_classifier_url(classifier),
)
release_url = "{nexus_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
nexus_url=nexus_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
file_name=file_name,
)
log.debug("release_url=%s", release_url)
return release_url, file_name
def _get_artifact_metadata_url(nexus_url, repository, group_id, artifact_id):
group_url = __get_group_id_subpath(group_id)
# for released versions the suffix for the file is same as version
artifact_metadata_url = (
"{nexus_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml".format(
nexus_url=nexus_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
)
)
log.debug("artifact_metadata_url=%s", artifact_metadata_url)
return artifact_metadata_url
def _get_artifact_metadata_xml(nexus_url, repository, group_id, artifact_id, headers):
artifact_metadata_url = _get_artifact_metadata_url(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
)
try:
request = urllib.request.Request(artifact_metadata_url, None, headers)
artifact_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
artifact_metadata_url, err
)
raise CommandExecutionError(message)
log.debug("artifact_metadata_xml=%s", artifact_metadata_xml)
return artifact_metadata_xml
def _get_artifact_metadata(nexus_url, repository, group_id, artifact_id, headers):
metadata_xml = _get_artifact_metadata_xml(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
)
root = ET.fromstring(metadata_xml)
assert group_id == root.find("groupId").text
assert artifact_id == root.find("artifactId").text
versions = root.find("versioning").find("versions")
versionList = []
for version in versions.iter("version"):
versionList.append(version.text)
latest_version = max(versionList)
log.debug("latest version=%s", latest_version)
return {"latest_version": latest_version}
# functions for handling snapshots
def _get_snapshot_version_metadata_url(
nexus_url, repository, group_id, artifact_id, version
):
group_url = __get_group_id_subpath(group_id)
# for released versions the suffix for the file is same as version
snapshot_version_metadata_url = "{nexus_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml".format(
nexus_url=nexus_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
)
log.debug("snapshot_version_metadata_url=%s", snapshot_version_metadata_url)
return snapshot_version_metadata_url
def _get_snapshot_version_metadata_xml(
nexus_url, repository, group_id, artifact_id, version, headers
):
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
)
try:
request = urllib.request.Request(snapshot_version_metadata_url, None, headers)
snapshot_version_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
snapshot_version_metadata_url, err
)
raise CommandExecutionError(message)
log.debug("snapshot_version_metadata_xml=%s", snapshot_version_metadata_xml)
return snapshot_version_metadata_xml
def _get_snapshot_version_metadata(
nexus_url, repository, group_id, artifact_id, version, headers
):
metadata_xml = _get_snapshot_version_metadata_xml(
nexus_url=nexus_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
headers=headers,
)
metadata = ET.fromstring(metadata_xml)
assert group_id == metadata.find("groupId").text
assert artifact_id == metadata.find("artifactId").text
assert version == metadata.find("version").text
snapshot_versions = metadata.find("versioning").find("snapshotVersions")
extension_version_dict = {}
for snapshot_version in snapshot_versions:
extension = snapshot_version.find("extension").text
value = snapshot_version.find("value").text
extension_version_dict[extension] = value
if snapshot_version.find("classifier") is not None:
classifier = snapshot_version.find("classifier").text
extension_version_dict[classifier] = value
return {"snapshot_versions": extension_version_dict}
def __save_artifact(artifact_url, target_file, headers):
log.debug("__save_artifact(%s, %s)", artifact_url, target_file)
result = {"status": False, "changes": {}, "comment": ""}
if os.path.isfile(target_file):
log.debug("File %s already exists, checking checksum...", target_file)
checksum_url = artifact_url + ".sha1"
checksum_success, artifact_sum, checksum_comment = __download(
checksum_url, headers
)
if checksum_success:
log.debug("Downloaded SHA1 SUM: %s", artifact_sum)
file_sum = __salt__["file.get_hash"](path=target_file, form="sha1")
log.debug("Target file (%s) SHA1 SUM: %s", target_file, file_sum)
if artifact_sum == file_sum:
result["status"] = True
result["target_file"] = target_file
result["comment"] = (
"File {} already exists, checksum matches with nexus.\n"
"Checksum URL: {}".format(target_file, checksum_url)
)
return result
else:
result["comment"] = (
"File {} already exists, checksum does not match with nexus!\n"
"Checksum URL: {}".format(target_file, checksum_url)
)
else:
result["status"] = False
result["comment"] = checksum_comment
return result
log.debug("Downloading: %s -> %s", artifact_url, target_file)
try:
request = urllib.request.Request(artifact_url, None, headers)
f = urllib.request.urlopen(request)
with salt.utils.files.fopen(target_file, "wb") as local_file:
local_file.write(salt.utils.stringutils.to_bytes(f.read()))
result["status"] = True
result["comment"] = __append_comment(
"Artifact downloaded from URL: {}".format(artifact_url),
result["comment"],
)
result["changes"]["downloaded_file"] = target_file
result["target_file"] = target_file
except (HTTPError, URLError) as e:
result["status"] = False
result["comment"] = __get_error_comment(e, artifact_url)
return result
def __get_group_id_subpath(group_id):
group_url = group_id.replace(".", "/")
return group_url
def __get_classifier_url(classifier):
has_classifier = classifier is not None and classifier != ""
return "-" + classifier if has_classifier else ""
def __download(request_url, headers):
log.debug("Downloading content from %s", request_url)
success = False
content = None
comment = None
try:
request = urllib.request.Request(request_url, None, headers)
url = urllib.request.urlopen(request)
content = url.read()
success = True
except HTTPError as e:
comment = __get_error_comment(e, request_url)
return success, content, comment
def __get_error_comment(http_error, request_url):
if http_error.code == http.client.NOT_FOUND:
comment = "HTTP Error 404. Request URL: " + request_url
elif http_error.code == http.client.CONFLICT:
comment = (
"HTTP Error 409: Conflict. Requested URL: {}. \nThis error may be caused by"
" reading snapshot artifact from non-snapshot repository.".format(
request_url
)
)
else:
comment = "HTTP Error {err_code}. Request URL: {url}".format(
err_code=http_error.code, url=request_url
)
return comment
def __append_comment(new_comment, current_comment=""):
return current_comment + "\n" + new_comment
class nexusError(Exception):
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return repr(self.value)
|
|
from urllib.parse import urlencode
from django.apps import apps
from django.contrib.admin.utils import quote, unquote
from django.core.paginator import Paginator
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from django.utils.translation import ngettext
from wagtail.admin import messages
from wagtail.admin.auth import permission_denied
from wagtail.admin.edit_handlers import ObjectList, extract_panel_definitions_from_model_class
from wagtail.admin.forms.search import SearchForm
from wagtail.core import hooks
from wagtail.search.backends import get_search_backend
from wagtail.search.index import class_is_indexed
from wagtail.snippets.models import get_snippet_models
from wagtail.snippets.permissions import get_permission_name, user_can_edit_snippet_type
# == Helper functions ==
def get_snippet_model_from_url_params(app_name, model_name):
"""
Retrieve a model from an app_label / model_name combo.
Raise Http404 if the model is not a valid snippet type.
"""
try:
model = apps.get_model(app_name, model_name)
except LookupError:
raise Http404
if model not in get_snippet_models():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return model
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
if hasattr(model, 'edit_handler'):
# use the edit handler specified on the page class
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels)
SNIPPET_EDIT_HANDLERS[model] = edit_handler.bind_to(model=model)
return SNIPPET_EDIT_HANDLERS[model]
# == Views ==
def index(request):
snippet_model_opts = [
model._meta for model in get_snippet_models()
if user_can_edit_snippet_type(request.user, model)]
return TemplateResponse(request, 'wagtailsnippets/snippets/index.html', {
'snippet_model_opts': sorted(
snippet_model_opts, key=lambda x: x.verbose_name.lower())})
def list(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permissions = [
get_permission_name(action, model)
for action in ['add', 'change', 'delete']
]
if not any([request.user.has_perm(perm) for perm in permissions]):
return permission_denied(request)
items = model.objects.all()
# Preserve the snippet's model-level ordering if specified, but fall back on PK if not
# (to ensure pagination is consistent)
if not items.ordered:
items = items.order_by('pk')
# Search
is_searchable = class_is_indexed(model)
is_searching = False
search_query = None
if is_searchable and 'q' in request.GET:
search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_backend = get_search_backend()
items = search_backend.search(search_query, items)
is_searching = True
else:
search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
paginator = Paginator(items, per_page=20)
paginated_items = paginator.get_page(request.GET.get('p'))
# Template
if request.is_ajax():
template = 'wagtailsnippets/snippets/results.html'
else:
template = 'wagtailsnippets/snippets/type_index.html'
return TemplateResponse(request, template, {
'model_opts': model._meta,
'items': paginated_items,
'can_add_snippet': request.user.has_perm(get_permission_name('add', model)),
'can_delete_snippets': request.user.has_perm(get_permission_name('delete', model)),
'is_searchable': is_searchable,
'search_form': search_form,
'is_searching': is_searching,
'query_string': search_query,
})
def create(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('add', model)
if not request.user.has_perm(permission):
return permission_denied(request)
for fn in hooks.get_hooks('before_create_snippet'):
result = fn(request, model)
if hasattr(result, 'status_code'):
return result
instance = model()
edit_handler = get_snippet_edit_handler(model)
edit_handler = edit_handler.bind_to(request=request)
form_class = edit_handler.get_form_class()
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("%(snippet_type)s '%(instance)s' created.") % {
'snippet_type': capfirst(model._meta.verbose_name),
'instance': instance
},
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, quote(instance.pk))
), _('Edit'))
]
)
for fn in hooks.get_hooks('after_create_snippet'):
result = fn(request, instance)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.validation_error(
request, _("The snippet could not be created due to errors."), form
)
else:
form = form_class(instance=instance)
edit_handler = edit_handler.bind_to(instance=instance, form=form)
return TemplateResponse(request, 'wagtailsnippets/snippets/create.html', {
'model_opts': model._meta,
'edit_handler': edit_handler,
'form': form,
})
def edit(request, app_label, model_name, pk):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('change', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, pk=unquote(pk))
for fn in hooks.get_hooks('before_edit_snippet'):
result = fn(request, instance)
if hasattr(result, 'status_code'):
return result
edit_handler = get_snippet_edit_handler(model)
edit_handler = edit_handler.bind_to(instance=instance, request=request)
form_class = edit_handler.get_form_class()
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("%(snippet_type)s '%(instance)s' updated.") % {
'snippet_type': capfirst(model._meta.verbose_name),
'instance': instance
},
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, quote(instance.pk))
), _('Edit'))
]
)
for fn in hooks.get_hooks('after_edit_snippet'):
result = fn(request, instance)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.validation_error(
request, _("The snippet could not be saved due to errors."), form
)
else:
form = form_class(instance=instance)
edit_handler = edit_handler.bind_to(form=form)
return TemplateResponse(request, 'wagtailsnippets/snippets/edit.html', {
'model_opts': model._meta,
'instance': instance,
'edit_handler': edit_handler,
'form': form,
})
def delete(request, app_label, model_name, pk=None):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('delete', model)
if not request.user.has_perm(permission):
return permission_denied(request)
if pk:
instances = [get_object_or_404(model, pk=unquote(pk))]
else:
ids = request.GET.getlist('id')
instances = model.objects.filter(pk__in=ids)
for fn in hooks.get_hooks('before_delete_snippet'):
result = fn(request, instances)
if hasattr(result, 'status_code'):
return result
count = len(instances)
if request.method == 'POST':
for instance in instances:
instance.delete()
if count == 1:
message_content = _("%(snippet_type)s '%(instance)s' deleted.") % {
'snippet_type': capfirst(model._meta.verbose_name),
'instance': instance
}
else:
# This message is only used in plural form, but we'll define it with ngettext so that
# languages with multiple plural forms can be handled correctly (or, at least, as
# correctly as possible within the limitations of verbose_name_plural...)
message_content = ngettext(
"%(count)d %(snippet_type)s deleted.",
"%(count)d %(snippet_type)s deleted.",
count
) % {
'snippet_type': capfirst(model._meta.verbose_name_plural),
'count': count
}
messages.success(request, message_content)
for fn in hooks.get_hooks('after_delete_snippet'):
result = fn(request, instances)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailsnippets:list', app_label, model_name)
return TemplateResponse(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'model_opts': model._meta,
'count': count,
'instances': instances,
'submit_url': (
reverse('wagtailsnippets:delete-multiple', args=(app_label, model_name))
+ '?' + urlencode([('id', instance.pk) for instance in instances])
),
})
def usage(request, app_label, model_name, pk):
model = get_snippet_model_from_url_params(app_label, model_name)
instance = get_object_or_404(model, pk=unquote(pk))
paginator = Paginator(instance.get_usage(), per_page=20)
used_by = paginator.get_page(request.GET.get('p'))
return TemplateResponse(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from google.cloud._testing import _GAXBaseAPI
TABLE_NAME = 'citizens'
COLUMNS = ['email', 'first_name', 'last_name', 'age']
VALUES = [
[u'phred@exammple.com', u'Phred', u'Phlyntstone', 32],
[u'bharney@example.com', u'Bharney', u'Rhubble', 31],
]
class _BaseTest(unittest.TestCase):
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = 'projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID
DATABASE_ID = 'database-id'
DATABASE_NAME = INSTANCE_NAME + '/databases/' + DATABASE_ID
SESSION_ID = 'session-id'
SESSION_NAME = DATABASE_NAME + '/sessions/' + SESSION_ID
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
class Test_BatchBase(_BaseTest):
def _getTargetClass(self):
from google.cloud.spanner.batch import _BatchBase
return _BatchBase
def _compare_values(self, result, source):
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
for found, expected in zip(result, source):
self.assertIsInstance(found, ListValue)
self.assertEqual(len(found.values), len(expected))
for found_cell, expected_cell in zip(found.values, expected):
self.assertIsInstance(found_cell, Value)
if isinstance(expected_cell, int):
self.assertEqual(
int(found_cell.string_value), expected_cell)
else:
self.assertEqual(found_cell.string_value, expected_cell)
def test_ctor(self):
session = _Session()
base = self._make_one(session)
self.assertIs(base._session, session)
self.assertEqual(len(base._mutations), 0)
def test__check_state_virtual(self):
session = _Session()
base = self._make_one(session)
with self.assertRaises(NotImplementedError):
base._check_state()
def test_insert(self):
from google.cloud.proto.spanner.v1.mutation_pb2 import Mutation
session = _Session()
base = self._make_one(session)
base.insert(TABLE_NAME, columns=COLUMNS, values=VALUES)
self.assertEqual(len(base._mutations), 1)
mutation = base._mutations[0]
self.assertIsInstance(mutation, Mutation)
write = mutation.insert
self.assertIsInstance(write, Mutation.Write)
self.assertEqual(write.table, TABLE_NAME)
self.assertEqual(write.columns, COLUMNS)
self._compare_values(write.values, VALUES)
def test_update(self):
from google.cloud.proto.spanner.v1.mutation_pb2 import Mutation
session = _Session()
base = self._make_one(session)
base.update(TABLE_NAME, columns=COLUMNS, values=VALUES)
self.assertEqual(len(base._mutations), 1)
mutation = base._mutations[0]
self.assertIsInstance(mutation, Mutation)
write = mutation.update
self.assertIsInstance(write, Mutation.Write)
self.assertEqual(write.table, TABLE_NAME)
self.assertEqual(write.columns, COLUMNS)
self._compare_values(write.values, VALUES)
def test_insert_or_update(self):
from google.cloud.proto.spanner.v1.mutation_pb2 import Mutation
session = _Session()
base = self._make_one(session)
base.insert_or_update(TABLE_NAME, columns=COLUMNS, values=VALUES)
self.assertEqual(len(base._mutations), 1)
mutation = base._mutations[0]
self.assertIsInstance(mutation, Mutation)
write = mutation.insert_or_update
self.assertIsInstance(write, Mutation.Write)
self.assertEqual(write.table, TABLE_NAME)
self.assertEqual(write.columns, COLUMNS)
self._compare_values(write.values, VALUES)
def test_replace(self):
from google.cloud.proto.spanner.v1.mutation_pb2 import Mutation
session = _Session()
base = self._make_one(session)
base.replace(TABLE_NAME, columns=COLUMNS, values=VALUES)
self.assertEqual(len(base._mutations), 1)
mutation = base._mutations[0]
self.assertIsInstance(mutation, Mutation)
write = mutation.replace
self.assertIsInstance(write, Mutation.Write)
self.assertEqual(write.table, TABLE_NAME)
self.assertEqual(write.columns, COLUMNS)
self._compare_values(write.values, VALUES)
def test_delete(self):
from google.cloud.proto.spanner.v1.mutation_pb2 import Mutation
from google.cloud.spanner.keyset import KeySet
keys = [[0], [1], [2]]
keyset = KeySet(keys=keys)
session = _Session()
base = self._make_one(session)
base.delete(TABLE_NAME, keyset=keyset)
self.assertEqual(len(base._mutations), 1)
mutation = base._mutations[0]
self.assertIsInstance(mutation, Mutation)
delete = mutation.delete
self.assertIsInstance(delete, Mutation.Delete)
self.assertEqual(delete.table, TABLE_NAME)
key_set_pb = delete.key_set
self.assertEqual(len(key_set_pb.ranges), 0)
self.assertEqual(len(key_set_pb.keys), len(keys))
for found, expected in zip(key_set_pb.keys, keys):
self.assertEqual(
[int(value.string_value) for value in found.values], expected)
class TestBatch(_BaseTest):
def _getTargetClass(self):
from google.cloud.spanner.batch import Batch
return Batch
def test_ctor(self):
session = _Session()
batch = self._make_one(session)
self.assertIs(batch._session, session)
def test_commit_already_committed(self):
from google.cloud.spanner.keyset import KeySet
keys = [[0], [1], [2]]
keyset = KeySet(keys=keys)
database = _Database()
session = _Session(database)
batch = self._make_one(session)
batch.committed = object()
batch.delete(TABLE_NAME, keyset=keyset)
with self.assertRaises(ValueError):
batch.commit()
def test_commit_grpc_error(self):
from google.gax.errors import GaxError
from google.cloud.proto.spanner.v1.transaction_pb2 import (
TransactionOptions)
from google.cloud.proto.spanner.v1.mutation_pb2 import (
Mutation as MutationPB)
from google.cloud.spanner.keyset import KeySet
keys = [[0], [1], [2]]
keyset = KeySet(keys=keys)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_random_gax_error=True)
session = _Session(database)
batch = self._make_one(session)
batch.delete(TABLE_NAME, keyset=keyset)
with self.assertRaises(GaxError):
batch.commit()
(session, mutations, single_use_txn, options) = api._committed
self.assertEqual(session, self.SESSION_NAME)
self.assertTrue(len(mutations), 1)
mutation = mutations[0]
self.assertIsInstance(mutation, MutationPB)
self.assertTrue(mutation.HasField('delete'))
delete = mutation.delete
self.assertEqual(delete.table, TABLE_NAME)
keyset_pb = delete.key_set
self.assertEqual(len(keyset_pb.ranges), 0)
self.assertEqual(len(keyset_pb.keys), len(keys))
for found, expected in zip(keyset_pb.keys, keys):
self.assertEqual(
[int(value.string_value) for value in found.values], expected)
self.assertIsInstance(single_use_txn, TransactionOptions)
self.assertTrue(single_use_txn.HasField('read_write'))
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_commit_ok(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
TransactionOptions)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_commit_response=response)
session = _Session(database)
batch = self._make_one(session)
batch.insert(TABLE_NAME, COLUMNS, VALUES)
committed = batch.commit()
self.assertEqual(committed, now)
self.assertEqual(batch.committed, committed)
(session, mutations, single_use_txn, options) = api._committed
self.assertEqual(session, self.SESSION_NAME)
self.assertEqual(mutations, batch._mutations)
self.assertIsInstance(single_use_txn, TransactionOptions)
self.assertTrue(single_use_txn.HasField('read_write'))
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_context_mgr_already_committed(self):
import datetime
from google.cloud._helpers import UTC
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI()
session = _Session(database)
batch = self._make_one(session)
batch.committed = now
with self.assertRaises(ValueError):
with batch:
pass # pragma: NO COVER
self.assertEqual(api._committed, None)
def test_context_mgr_success(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud.proto.spanner.v1.transaction_pb2 import (
TransactionOptions)
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_commit_response=response)
session = _Session(database)
batch = self._make_one(session)
with batch:
batch.insert(TABLE_NAME, COLUMNS, VALUES)
self.assertEqual(batch.committed, now)
(session, mutations, single_use_txn, options) = api._committed
self.assertEqual(session, self.SESSION_NAME)
self.assertEqual(mutations, batch._mutations)
self.assertIsInstance(single_use_txn, TransactionOptions)
self.assertTrue(single_use_txn.HasField('read_write'))
self.assertEqual(options.kwargs['metadata'],
[('google-cloud-resource-prefix', database.name)])
def test_context_mgr_failure(self):
import datetime
from google.cloud.proto.spanner.v1.spanner_pb2 import CommitResponse
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_pb_timestamp
now = datetime.datetime.utcnow().replace(tzinfo=UTC)
now_pb = _datetime_to_pb_timestamp(now)
response = CommitResponse(commit_timestamp=now_pb)
database = _Database()
api = database.spanner_api = _FauxSpannerAPI(
_commit_response=response)
session = _Session(database)
batch = self._make_one(session)
class _BailOut(Exception):
pass
with self.assertRaises(_BailOut):
with batch:
batch.insert(TABLE_NAME, COLUMNS, VALUES)
raise _BailOut()
self.assertEqual(batch.committed, None)
self.assertEqual(api._committed, None)
self.assertEqual(len(batch._mutations), 1)
class _Session(object):
def __init__(self, database=None, name=TestBatch.SESSION_NAME):
self._database = database
self.name = name
class _Database(object):
name = 'testing'
class _FauxSpannerAPI(_GAXBaseAPI):
_create_instance_conflict = False
_instance_not_found = False
_committed = None
def commit(self, session, mutations,
transaction_id='', single_use_transaction=None, options=None):
from google.gax.errors import GaxError
assert transaction_id == ''
self._committed = (session, mutations, single_use_transaction, options)
if self._random_gax_error:
raise GaxError('error')
return self._commit_response
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# need linear algebra packages
import scipy.sparse.linalg as _sla
import scipy.linalg as _la
import scipy.sparse as _sp
import numpy as _np
from scipy.integrate import complex_ode
from joblib import delayed,Parallel
from numpy import vstack
import warnings
__all__ = ['Floquet_t_vec','Floquet_t_vec']
#warnings.warn("Floquet Package has not been fully tested yet, please report bugs to: https://github.com/weinbe58/qspin/issues.",UserWarning,stacklevel=3)
def _range_iter(start,stop,step):
"""'xrange' is replaced with 'range' in python 3. If python 2 is being used, range will cause memory overflow.
This function is a work around to get the functionality of 'xrange' for both python 2 and 3 simultaineously.
"""
from itertools import count
counter = count(start,step)
while True:
i = next(counter)
if i < stop:
yield i
else:
break
def _evolve_cont(i,H,T,atol=1E-9,rtol=1E-9):
"""This function evolves the i-th local basis state under the Hamiltonian H up to period T.
It is used to construct the stroboscpoic evolution operator.
"""
psi0=_np.zeros((H.Ns,),dtype=_np.complex128)
psi0[i]=1.0
t_list = [0,T]
nsteps = 1
while nsteps<1E7:
try:
psi_t=H.evolve(psi0,0,t_list,eom="SE",iterate=False,atol=atol,rtol=rtol)
return psi_t[:,-1]
except:
RuntimeError
nsteps *= 10
t_list = _np.linspace(0,T,num=nsteps+1,endpoint=True)
raise RuntimeError("Ode solver takes more than {0:d} nsteps to complete time evolution. Cannot integrate ODE successfully.".format(nsteps))
'''
solver=complex_ode(H._hamiltonian__SO)
solver.set_integrator('dop853', atol=atol,rtol=rtol,nsteps=nsteps)
solver.set_initial_value(psi0,t=0.0)
t_list = [0,T]
nsteps = 1
while True:
for t in t_list[1:]:
solver.integrate(t)
if solver.successful():
if t == T:
return solver.y
continue
else:
break
nsteps *= 10
t_list = _np.linspace(0,T,num=nsteps+1,endpoint=True)
'''
def _evolve_step_3(i,H_list,dt_list):
"""This function calculates the evolved state for Periodic Step (point 3. in def of 'evo_dict').
"""
psi0=_np.zeros((H_list[0].Ns,),dtype=_np.complex128)
psi0[i]=1.0
for dt,H in zip(dt_list,H_list):
# can replace _sla.expm_multiply by tools.expm_multiply_parallel
psi0 = _sla.expm_multiply(-1j*dt*H.tocsr(),psi0)
return psi0
def _evolve_step_2(i,H,t_list,dt_list):
"""This function calculates the evolved state for Periodic Step (point 2. in def of 'evo_dict'.
"""
psi0=_np.zeros((H.Ns,),dtype=_np.complex128)
psi0[i]=1.0
for t,dt in zip(t_list,dt_list):
# can replace _sla.expm_multiply by tools.expm_multiply_parallel
psi0 = _sla.expm_multiply(-1j*dt*H.tocsr(t),psi0)
return psi0
### USING JOBLIB ###
def _get_U_cont(H,T,n_jobs,atol=1E-9,rtol=1E-9):
sols=Parallel(n_jobs=n_jobs)(delayed(_evolve_cont)(i,H,T,atol,rtol) for i in _range_iter(0,H.Ns,1))
return vstack(sols)
def _get_U_step_3(H_list,dt_list,n_jobs):
sols=Parallel(n_jobs=n_jobs)(delayed(_evolve_step_3)(i,H_list,dt_list) for i in _range_iter(0,H_list[0].Ns,1))
return vstack(sols)
def _get_U_step_2(H,t_list,dt_list,n_jobs):
sols=Parallel(n_jobs=n_jobs)(delayed(_evolve_step_2)(i,H,t_list,dt_list) for i in _range_iter(0,H.Ns,1))
return vstack(sols)
class Floquet(object):
"""Calculates the Floquet spectrum, Floquet Hamiltonian and Floquet states.
Loops over the basis states to compute the Floquet unitary :math:`U_F` (evolution operator over one period) for a
periodically-driven system governed by the Hamiltonian :math:`H(t)=H(t+T)`:
.. math::
U_F=U(T,0)=\\mathcal{T}_t\\exp\\left(-i\\int_0^T\\mathrm{d}t H(t) \\right)
with :math:`\\mathcal{T}_t\\exp` denoting the time-ordered exponential.
Examples
--------
Consider the following periodically driven spin-1/2 Hamiltonian
.. math::
H(t) = \\left\\{
\\begin{array}{cl} \\sum_j J\\sigma^z_{j+1}\\sigma^z_j + h\\sigma^z_j , & t\\in[-T/4,T/4] \\newline
\\sum_j g\\sigma^x_j, & t \\in[T/4,3T/4]
\\end{array}
\\right\\} \\mathrm{mod}\\ T
where :math:`T=2\\pi/\\Omega` is the drive period. We choose the starting point of the evolution
(or equivalently -- the driving phase) to be :math:`t=0`.
The following snippet of code shows how to calculate the Floquet eigenstates and the corresponding quasienergies,
using `evo_dict` variable, case ii (see below).
.. literalinclude:: ../../doc_examples/Floquet_class-example.py
:linenos:
:language: python
:lines: 7-
"""
def __init__(self,evo_dict,HF=False,UF=False,thetaF=False,VF=False,n_jobs=1):
"""Instantiates the `Floquet` class.
Parameters
-----------
evo_dict : dict
Dictionary which passes the different types of protocols to calculate the Floquet unitary.
Depending on the protocol type, it contains the following keys:
i) Periodic continuous protocol from a `hamiltonian` object.
* `H` : hamiltonian object to generate the time evolution.
* `T` : period of the protocol.
* `rtol` : (optional) relative tolerance for the ODE solver. (default = 1E-9)
* `atol` : (optional) absolute tolerance for the ODE solver. (default = 1E-9)
ii) Periodic step protocol from a `hamiltonian` object.
* `H` : single hamiltonian object to generate the hamiltonians at each step. Periodic step drives can be encoded using a single function, e.g. :math:`\\mathrm{sign}(\\cos(\\Omega t))`.
* `t_list` : list of times to evaluate the hamiltonian at for each step.
* `dt_list` : list of time step durations for each step of the evolution.
* `T`: (optional) drive period used to compute the Floquet Hamiltonian `H_F`. If not specified, then `T=sum(dt_list)`. Use this option for periodic delta kicks.
iii) Periodic step protocol from a list of hamiltonians.
* `H_list` : list of matrices to evolve with.
* `dt_list` : list of time step durations. Must be the same size as `H_list`.
* `T`: (optional) drive period used to compute the Floquet Hamiltonian `H_F`. If not specified, then `T=sum(dt_list)`. Use this option for periodic delta kicks.
HF : bool
Set to `True` to calculate and return Floquet Hamiltonian under attribute `_.HF`. Default is `False`.
UF : bool
Set to `True` to save evolution operator under attribute `_.UF`. Default is `False`.
thetaF : bool
Set to `True` to save eigenvalues of the evolution operator (Floquet phases) under attribute `_.thetaF`. Default is `False`.
VF : bool
Set to `True` to save Floquet states under attribute _.VF. Default is `False`.
n_jobs : int, optional
Sets the number of processors which are used when looping over the basis states to compute the Floquet unitary. Default is `False`.
"""
from ..operators import ishamiltonian
variables = []
if HF: variables.append('HF')
if UF: variables.append('UF')
if VF: variables.append('VF')
if thetaF: variables.append('thetaF')
if isinstance(evo_dict,dict):
keys = evo_dict.keys()
if set(keys) == set(["H","T"]) or set(keys) == set(["H","T","atol"]) or set(keys) == set(["H","T","rtol"]) or set(keys) == set(["H","T","atol","rtol"]):
H = evo_dict["H"]
T = evo_dict["T"]
self._atol = evo_dict.get("atol")
self._rtol = evo_dict.get("rtol")
if self._atol is None:
self._atol=1E-12
elif type(self._atol) is not float:
raise ValueError("expecting float for 'atol'.")
if self._rtol is None:
self._rtol=1E-12
elif type(self._rtol) is not float:
raise ValueError("expecting float for 'rtol'.")
if not ishamiltonian(H):
raise ValueError("expecting hamiltonian object for 'H'.")
if not _np.isscalar(T):
raise ValueError("expecting scalar object for 'T'.")
if _np.iscomplex(T):
raise ValueError("expecting real value for 'T'.")
### check if H is periodic with period T
# define arbitrarily complicated weird-ass number
t = _np.cos( (_np.pi/_np.exp(0))**( 1.0/_np.euler_gamma ) )
for func in H.dynamic:
if abs(func(t) - func(t+T) ) > 1E5*_np.finfo(_np.complex128).eps:
print(abs(func(t) - func(t+T) ), 1E3*_np.finfo(_np.complex128).eps)
raise TypeError("Hamiltonian 'H' must be periodic with period 'T'!")
if not (type(n_jobs) is int):
raise TypeError("expecting integer value for optional variable 'n_jobs'!")
self._T = T
# calculate evolution operator
UF = _get_U_cont(H,self.T,n_jobs,atol=self._atol,rtol=self._rtol)
elif set(keys) == set(["H","t_list","dt_list"]) or set(keys) == set(["H","t_list","dt_list","T"]):
H = evo_dict["H"]
t_list = _np.asarray(evo_dict["t_list"],dtype=_np.float64)
dt_list = _np.asarray(evo_dict["dt_list"],dtype=_np.float64)
if t_list.ndim != 1:
raise ValueError("t_list must be 1d array.")
if dt_list.ndim != 1:
raise ValueError("dt_list must be 1d array.")
if "T" in set(keys):
self._T=evo_dict["T"]
else:
self._T = dt_list.sum()
if not ishamiltonian(H):
raise ValueError("expecting hamiltonian object for 'H'.")
# calculate evolution operator
UF = _get_U_step_2(H,t_list,dt_list,n_jobs)
elif set(keys) == set(["H_list","dt_list"]) or set(keys) == set(["H_list","dt_list","T"]):
H_list = evo_dict["H_list"]
dt_list = _np.asarray(evo_dict["dt_list"],dtype=_np.float64)
if dt_list.ndim != 1:
raise ValueError("dt_list must be 1d array.")
if "T" in set(keys):
self._T=evo_dict["T"]
else:
self._T = dt_list.sum()
if type(H_list) not in (list,tuple):
raise ValueError("expecting list/tuple for H_list.")
if len(dt_list) != len(H_list):
raise ValueError("Expecting arguments 'H_list' and 'dt_list' to have the same length!")
# calculate evolution operator
UF = _get_U_step_3(H_list,dt_list,n_jobs)
else:
raise ValueError("evo_dict={0} is not correct format.".format(evo_dict))
else:
raise ValueError("evo_dict={0} is not correct format.".format(evo_dict))
if 'UF' in variables:
self._UF = _np.copy(UF)
if 'HF' in variables:
self._HF = 1j/self._T*_la.logm(UF)
# find Floquet states and phases
if "VF" in variables:
thetaF, VF = _la.eig(UF,overwrite_a=True)
# check and orthogonalise VF in degenerate subspaces
if _np.any( _np.diff(_np.sort(thetaF)) < 1E3*_np.finfo(thetaF.dtype).eps):
VF,_ = _la.qr(VF, overwrite_a=True)
# https://math.stackexchange.com/questions/269164/diagonalizable-unitarily-schur-factorization
# thetaF, VF = _la.schur(UF,overwrite_a=True,output='real')
# thetaF=thetaF.diagonal()
# calculate and order q'energies
EF = _np.real( 1j/self.T*_np.log(thetaF) )
# sort and order
ind_EF = _np.argsort(EF)
self._EF = _np.array(EF[ind_EF])
self._VF = _np.array(VF[:,ind_EF])
# clear up junk
del VF
else:
thetaF = _la.eigvals(UF,overwrite_a=True)
# calculate and order q'energies
EF = _np.real( 1j/self.T*_np.log(thetaF) )
ind_EF = _np.argsort(EF)
self._EF = _np.array(EF[ind_EF])
if 'thetaF' in variables:
# sort phases
thetaF = _np.array(thetaF[ind_EF])
self._thetaF = thetaF
@property
def T(self):
"""float: drive period."""
return self._T
@property
def EF(self):
"""numpy.ndarray(float): ordered Floquet quasi-energies in interval :math:`[-\\Omega,\\Omega]`."""
return self._EF
@property
def HF(self):
"""numpy.ndarray(float): Floquet Hamiltonian.
Requires __init__ argument HF=True.
"""
if hasattr(self,"_HF"):
return self._HF
else:
raise AttributeError("missing atrribute 'HF'.")
@property
def UF(self):
"""numpy.ndarray(float): Floquet unitary.
Requires __init__ argument UF=True.
"""
if hasattr(self,"_UF"):
return self._UF
else:
raise AttributeError("missing atrribute 'UF'.")
@property
def thetaF(self):
"""numpy.ndarray(float): Floquet eigenphases.
Requires __init__ argument thetaF=True.
"""
if hasattr(self,"_thetaF"):
return self._thetaF
else:
raise AttributeError("missing atrribute 'thetaF'.")
@property
def VF(self):
"""numpy.ndarray(float): Floquet eigenbasis (in columns).
Requires __init__ argument VF=True.
"""
if hasattr(self,"_VF"):
return self._VF
else:
raise AttributeError("missing atrribute 'VF'.")
class Floquet_t_vec(object):
"""Creates a Floquet time vector with fixed number of points per period.
This time vector hits all stroboscopic times, and has many useful attributes. The time vector
can be divided in three parts corresponding to three regimes of periodic evolution:
ramp-up, constant and ramp-down.
Particularly useful for studying periodically-driven systems.
Examples
--------
The following code shows how to use the `Floquet_t_vec` class.
.. literalinclude:: ../../doc_examples/Floquet_t_vec-example.py
:linenos:
:language: python
:lines: 7-
"""
def __init__(self, Omega, N_const, len_T=100, N_up=0, N_down=0):
"""
Parameters
-----------
Omega : float
Drive frequency.
N_const : int
Number of time periods in the constant part (period) of the time vector.
len_T : int
Number of time points within a single period. N.B. the last period interval is assumed
open on the right, i.e. [0,T) and the point T is NOT counted towards 'len_T'.
N_up : int, optional
Number of time periods in the up-part (period) of time vector.
N_down : int, optional
Number of time periods in the down-part (period) of time vector.
"""
# total number of periods
self._N = N_up+N_const+N_down
# total length of a period
self._len_T = len_T
# driving period T
self._T = 2.0*_np.pi/Omega
# define time vector
n = _np.linspace(-N_up, N_const+N_down, self.N*len_T+1)
self._vals = self.T*n
# total length of time vector
self._len = self.vals.size
# shape
self._shape = self._vals.shape
# time step
self._dt = self.T/self.len_T
# define index of period -N_up
ind0 = 0 #int( _np.squeeze( (n==-N_up).nonzero() ) )
# calculate stroboscopic times
self._strobo = _strobo_times(self.vals,self.len_T,ind0)
# define initial and final times and total duration
self._i = self.vals[0]
self._f = self.vals[-1]
self._tot = self._f - self._i
# if ramp is on, define more attributes
if N_up > 0 and N_down > 0:
t_up = self.vals[:self.strobo.inds[N_up]]
self._up = _periodic_ramp(N_up,t_up,self.T,self.len_T,ind0)
t_const = self.vals[self.strobo.inds[N_up]:self.strobo.inds[N_up+N_const]+1]
ind0 = self.up.strobo.inds[-1]+self.len_T
self._const = _periodic_ramp(N_const,t_const,self.T,self.len_T,ind0)
t_down = self.vals[self.strobo.inds[N_up+N_const]+1:self.strobo.inds[-1]+1]
ind0 = self.const.strobo.inds[-1]+self.len_T
self._down = _periodic_ramp(N_down,t_down,self.T,self.len_T,ind0)
elif N_up > 0:
t_up = self.vals[:self.strobo.inds[N_up]]
self._up = _periodic_ramp(N_up,t_up,self.T,self.len_T,ind0)
t_const = self.vals[self.strobo.inds[N_up]:self.strobo.inds[N_up+N_const]+1]
ind0 = self.up.strobo.inds[-1]+self.len_T
self._const = _periodic_ramp(N_const,t_const,self.T,self.len_T,ind0)
elif N_down > 0:
t_const = self.vals[self.strobo.inds[N_up]:self.strobo.inds[N_up+N_const]+1]
self._const = _periodic_ramp(N_const,t_const,self.T,self.len_T,ind0)
t_down = self.vals[self.strobo.inds[N_up+N_const]+1:self.strobo.inds[-1]+1]
ind0 = self.const.strobo.inds[-1]+self.len_T
self._down = _periodic_ramp(N_down,t_down,self.T,self.len_T,ind0)
def __iter__(self):
return self.vals.__iter__()
def __getitem__(self,s):
return self._vals.__getitem__(s)
def __str__(self):
return str(self._vals)
def __mul__(self,other):
return self._vals*other
def __div__(self,other):
return self._vals/other
def __truediv__(self,other):
return self._vals/other
def __len__(self):
return self._vals.__len__()
@property
def N(self):
"""int: total number of periods."""
return self._N
@property
def shape(self):
"""tuple: shape of array."""
return self._shape
@property
def len_T(self):
"""int: number of time points within one period, assumed half-open; [0,T)."""
return self._len_T
@property
def T(self):
"""float: drive period."""
return self._T
@property
def vals(self):
"""np.ndarray(float): time vector values."""
return self._vals
@property
def len(self):
"""int: length of time vector."""
return self._len
@property
def dt(self):
"""float: time vector step size."""
return self._dt
@property
def i(self):
"""float: initial time value."""
return self._i
@property
def f(self):
"""foat: final time value."""
return self._f
@property
def tot(self):
"""float: total time duration; `_.f - _.i` ."""
return self._tot
@property
def strobo(self):
"""obj: calculates stroboscopic times in time vector with period length `len_T` and assigns them as
attributes:
_.strobo.inds : numpy.ndarray(int)
indices of stroboscopic times (full periods).
_.strobo.vals : numpy.ndarray(float)
values of stroboscopic times (full periods).
"""
return self._strobo
@property
def up(self):
"""obj: refers to time vector of up-part (regime).
Inherits all attributes (e.g. `_.up.strobo.inds`) except `_.T`, `_.dt`, and `_.lenT`.
Requires optional `__init___` parameter `N_up` to be specified.
"""
if hasattr(self,"_up"):
return self._up
else:
raise AttributeError("missing attribute 'up'")
@property
def const(self):
"""obj: refers to time vector of const-part (regime).
Inherits all attributes (e.g. `_.const.strobo.inds`) except `_.T`, `_.dt`, and `_.lenT`.
"""
if hasattr(self,"_const"):
return self._const
else:
raise AttributeError("missing attribute 'const'")
@property
def down(self):
"""obj: refers to time vector of down-part (regime).
Inherits all attributes (e.g. `_.down.strobo.inds`) except `_.T`, `_.dt`, and `_.lenT`.
Requires optional __init___ parameter N_down to be specified.
"""
if hasattr(self,"_down"):
return self._down
else:
raise AttributeError("missing attribute 'down'")
def get_coordinates(self,index):
"""Returns (period number, index within period) of the `Floquet_t_vec` value stored at `index`.
Notes
-----
* This function finds the indegers (i,j), such that `t_evolve[t_evolve.strobo.inds[i-1] + j] = t_evolve[index]`.
* The function may return wrong results if the spacing between two consecutive (i.e. nonstroboscopic) `Floquet_t_vec` values is smaller than `1E-15`.
Parameters
-----------
index : int
Index, to compute the `Floquet_t_vec` coordinates of.
Returns
--------
tuple
(i,j) such that `t_evolve[t_evolve.strobo.inds[i] + j] = t_evolve[index]`.
Examples
---------
>>> t = Floquet_t_vec(10.0,10) # define a Floquet vector
>>> index = 145 # pick a random index
>>> print(t[index]) # check element
>>> (i,j) = t.get_coordinates(index) # decompose index into stroboscopic coordinates
>>> print( t[t.strobo.inds[i] + j] ) # we obtain back original element
"""
t=self._vals[index]
eps=1E-15
i=_np.searchsorted(self.strobo._vals,t+eps)-1
j=_np.where( _np.abs(t-i*self._T - self._vals[:self.strobo.inds[1]])<eps )[0][0]
return (i,j)
class _strobo_times():
def __init__(self,t,len_T,ind0):
"""
Calculates stroboscopic times in time vector t with period length len_T and assigns them as
attributes.
"""
# indices of strobo times
self._inds = _np.arange(0,t.size,len_T).astype(int)
#discrete stroboscopic t_vecs
self._vals = t.take(self._inds)
# update strobo indices to match shifted (ramped) ones
self._inds += ind0
@property
def inds(self):
return self._inds
@property
def vals(self):
return self._vals
def __iter__(self):
return self.vals.__iter__()
def __getitem__(self,s):
return self._vals.__getitem__(s)
def __str__(self):
return str(self._vals)
def __mul__(self,other):
return self._vals*other
def __div__(self,other):
return self._vals/other
def __truediv__(self,other):
return self._vals/other
def __len__(self):
return self._vals.__len__()
class _periodic_ramp():
def __init__(self,N,t,T,len_T,ind0):
"""Defines time vector attributes of each regime.
"""
self._N=N # total # periods
self._vals = t # time values
self._i = self._vals[0] # initial value
self._f = self._vals[-1] # final value
self._tot = self._N*T # total duration
self._len = self._vals.size # total length
self._strobo = _strobo_times(self._vals,len_T,ind0) # strobo attributes
def __iter__(self):
return self.vals.__iter__()
def __getitem__(self,s):
return self._vals.__getitem__(s)
def __str__(self):
return str(self._vals)
def __mul__(self,other):
return self._vals*other
def __div__(self,other):
return self._vals/other
def __truediv__(self,other):
return self._vals/other
def __len__(self):
return self._vals.__len__()
@property
def N(self):
return self._N
@property
def vals(self):
return self._vals
@property
def i(self):
return self._i
@property
def f(self):
return self._f
@property
def tot(self):
return self._tot
@property
def len(self):
return self._len
@property
def strobo(self):
return self._strobo
|
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function
from rplibs.six.moves import range # pylint: disable=import-error
from panda3d.core import Vec3
from rpcore.pynative.pointer_slot_storage import PointerSlotStorage
from rpcore.pynative.gpu_command import GPUCommand
MAX_LIGHT_COUNT = 65535
MAX_SHADOW_SOURCES = 2048
class InternalLightManager(object):
""" Please refer to the native C++ implementation for docstrings and comments.
This is just the python implementation, which does not contain documentation! """
def __init__(self):
self._lights = PointerSlotStorage(MAX_LIGHT_COUNT)
self._shadow_sources = PointerSlotStorage(MAX_SHADOW_SOURCES)
self._cmd_list = None
self._shadow_manager = None
self._camera_pos = Vec3(0)
self._shadow_update_distance = 100.0
def get_max_light_index(self):
return self._lights.get_max_index()
max_light_index = property(get_max_light_index)
def get_num_lights(self):
return self._lights.get_num_entries()
num_lights = property(get_num_lights)
def get_num_shadow_sources(self):
return self._shadow_sources.get_num_entries()
num_shadow_sources = property(get_num_shadow_sources)
def set_shadow_manager(self, shadow_manager):
self._shadow_manager = shadow_manager
def get_shadow_manager(self):
return self._shadow_manager
shadow_manager = property(get_shadow_manager, set_shadow_manager)
def set_command_list(self, cmd_list):
self._cmd_list = cmd_list
def set_camera_pos(self, pos):
self._camera_pos = pos
def set_shadow_update_distance(self, dist):
self._shadow_update_distance = dist
def add_light(self, light):
if light.has_slot():
print("ERROR: Cannot add light since it already has a slot!")
return
slot = self._lights.find_slot()
if slot < 0:
print("ERROR: Could not find a free slot for a new light!")
return
light.assign_slot(slot)
self._lights.reserve_slot(slot, light)
if light.get_casts_shadows():
self.setup_shadows(light)
self.gpu_update_light(light)
def setup_shadows(self, light):
light.init_shadow_sources()
light.update_shadow_sources()
num_sources = light.get_num_shadow_sources()
base_slot = self._shadow_sources.find_consecutive_slots(num_sources)
if base_slot < 0:
print("ERROR: Failed to find slot for shadow sources!")
return
for i in range(num_sources):
source = light.get_shadow_source(i)
source.set_needs_update(True)
slot = base_slot + i
self._shadow_sources.reserve_slot(slot, source)
source.set_slot(slot)
def remove_light(self, light):
assert light is not None
if not light.has_slot():
print("ERROR: Could not detach light, light was not attached!")
return
self._lights.free_slot(light.get_slot())
self.gpu_remove_light(light)
light.remove_slot()
if light.get_casts_shadows():
for i in range(light.get_num_shadow_sources()):
source = light.get_shadow_source(i)
if source.has_slot():
self._shadow_sources.free_slot(source.get_slot())
if source.has_region():
self._shadow_manager.get_atlas().free_region(source.get_region())
source.clear_region()
self.gpu_remove_consecutive_sources(
light.get_shadow_source(0), light.get_num_shadow_sources())
light.clear_shadow_sources()
def gpu_remove_consecutive_sources(self, first_source, num_sources):
cmd_remove = GPUCommand(GPUCommand.CMD_remove_sources)
cmd_remove.push_int(first_source.get_slot())
cmd_remove.push_int(num_sources)
self._cmd_list.add_command(cmd_remove)
def gpu_remove_light(self, light):
cmd_remove = GPUCommand(GPUCommand.CMD_remove_light)
cmd_remove.push_int(light.get_slot())
self._cmd_list.add_command(cmd_remove)
def gpu_update_light(self, light):
cmd_update = GPUCommand(GPUCommand.CMD_store_light)
cmd_update.push_int(light.get_slot())
light.write_to_command(cmd_update)
light.set_needs_update(False)
self._cmd_list.add_command(cmd_update)
def gpu_update_source(self, source):
cmd_update = GPUCommand(GPUCommand.CMD_store_source)
cmd_update.push_int(source.get_slot())
source.write_to_command(cmd_update)
self._cmd_list.add_command(cmd_update)
def update_lights(self):
for light in self._lights.begin():
if light.get_needs_update():
if light.casts_shadows:
light.update_shadow_sources()
self.gpu_update_light(light)
def update_shadow_sources(self):
sources_to_update = []
for source in self._shadow_sources.begin():
# if source and source.get_needs_update():
# sources_to_update.append(source)
if source:
bounds = source.get_bounds()
distance_to_camera = (self._camera_pos - bounds.get_center()) - bounds.get_radius()
if distance_to_camera < self._shadow_update_distance:
sources_to_update.append(source)
else:
if source.has_region():
self._shadow_manager.get_atlas().free_region(source.get_region())
source.clear_region()
def get_source_score(source):
dist = (source.get_bounds().get_center() - self._camera_pos).length()
return -dist + (10**10 if source.has_region() else 0)
sorted_sources = list(sorted(sources_to_update, key=get_source_score))
atlas = self._shadow_manager.get_atlas()
update_slots = min(
len(sorted_sources),
self._shadow_manager.get_num_update_slots_left())
for i in range(update_slots):
if sorted_sources[i].has_region():
atlas.free_region(sorted_sources[i].get_region())
for i in range(update_slots):
source = sorted_sources[i]
if not self._shadow_manager.add_update(source):
print("ERROR: Shadow manager ensured update slot, but slot is taken!")
break
region_size = atlas.get_required_tiles(source.get_resolution())
new_region = atlas.find_and_reserve_region(region_size, region_size)
new_uv_region = atlas.region_to_uv(new_region)
source.set_region(new_region, new_uv_region)
source.set_needs_update(False)
self.gpu_update_source(source)
def update(self):
self.update_lights()
self.update_shadow_sources()
|
|
from django.http import HttpResponseRedirect as Redirect, Http404
from django_openid import consumer, signed
from django_openid.utils import hex_to_int, int_to_hex
from django.conf import settings
from django.contrib.auth import authenticate
from django.core.mail import send_mail
import datetime
from urlparse import urljoin
# TODO: prevent multiple associations of same OpenID
class AuthConsumer(consumer.SessionConsumer):
"""
An OpenID consumer endpoint that integrates with Django's auth system.
Uses SessionConsumer rather than CookieConsumer because the auth system
relies on sessions already.
"""
after_login_redirect_url = '/'
associations_template = 'django_openid/associations.html'
login_plus_password_template = 'django_openid/login_plus_password.html'
recover_template = 'django_openid/recover.html'
already_logged_in_template = 'django_openid/already_logged_in.html'
pick_account_template = 'django_openid/pick_account.html'
show_associate_template = 'django_openid/associate.html'
recovery_email_template = 'django_openid/recovery_email.txt'
recovery_expired_template = 'django_openid/recovery_expired.html'
recovery_complete_template = 'django_openid/recovery_complete.html'
recovery_email_from = None
recovery_email_subject = 'Recover your account'
password_logins_enabled = True
account_recovery_enabled = True
need_authenticated_user_message = 'You need to sign in with an ' \
'existing user account to access this page.'
csrf_failed_message = 'Invalid submission'
associate_tampering_message = 'Invalid submission'
association_deleted_message = '%s has been deleted'
openid_now_associated_message = \
'The OpenID "%s" is now associated with your account.'
bad_password_message = 'Incorrect username or password'
invalid_token_message = 'Invalid token'
recovery_email_sent_message = 'Check your mail for further instructions'
recovery_not_found_message = 'No matching user was found'
recovery_multiple_found_message = 'Try entering your username instead'
r_user_not_found_message = 'That user account does not exist'
account_recovery_url = None
associate_salt = 'associate-salt'
associate_delete_salt = 'associate-delete-salt'
recovery_link_salt = 'recovery-link-salt'
recovery_link_secret = None # If None, uses settings.SECRET_KEY
# For generating recovery URLs
recovery_origin_date = datetime.date(2000, 1, 1)
recovery_expires_after_days = 3 # Number of days recovery URL is valid for
def show_login(self, request, extra_message=None):
if request.user.is_authenticated():
return self.show_already_logged_in(request)
response = super(AuthConsumer, self).show_login(
request, extra_message
)
if self.password_logins_enabled:
response.template_name = self.login_plus_password_template
response.template_context.update({
'account_recovery': self.account_recovery_enabled and (
self.account_recovery_url or (request.path + 'recover/')
),
})
return response
def show_already_logged_in(self, request):
return self.render(request, self.already_logged_in_template)
def do_login(self, request, extra_message=None):
if request.method == 'POST' and \
request.POST.get('username', '').strip():
# Do a username/password login instead
user = authenticate(
username = request.POST.get('username'),
password = request.POST.get('password')
)
if not user:
return self.show_login(request, self.bad_password_message)
else:
self.log_in_user(request, user)
return self.on_login_complete(request, user, openid=None)
else:
return super(AuthConsumer, self).do_login(request, extra_message)
def lookup_openid(self, request, identity_url):
# Imports lives inside this method so User won't get imported if you
# over-ride this in your own sub-class and use something else.
from django.contrib.auth.models import User
return list(
User.objects.filter(openids__openid = identity_url).distinct()
)
def log_in_user(self, request, user):
# Remember, openid might be None (after registration with none set)
from django.contrib.auth import login
# Nasty but necessary - annotate user and pretend it was the regular
# auth backend. This is needed so django.contrib.auth.get_user works:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
def on_login_complete(self, request, user, openid=None):
response = self.redirect_if_valid_next(request)
if not response:
response = Redirect(self.after_login_redirect_url)
return response
def on_logged_in(self, request, openid, openid_response):
# Do we recognise their OpenID?
matches = self.lookup_openid(request, openid)
# Are they logged in already?
if request.user.is_authenticated():
# Did we find their account already? If so, ignore login
if request.user.id in [u.id for u in matches]:
response = self.redirect_if_valid_next(request)
if not response:
response = Redirect(self.after_login_redirect_url)
return response
else:
# Offer to associate this OpenID with their account
return self.show_associate(request, openid)
if matches:
# If there's only one match, log them in as that user
if len(matches) == 1:
user = matches[0]
if self.user_can_login(request, user):
self.log_in_user(request, user)
return self.on_login_complete(request, user, openid)
else:
# User is not allowed to log in for some other reason -
# for example, they have not yet validated their e-mail
# or they have been banned from the site.
return self.show_you_cannot_login(request, user, openid)
# Otherwise, let them to pick which account they want to log in as
else:
return self.show_pick_account(request, openid)
else:
# We don't know anything about this openid
return self.show_unknown_openid(request, openid)
def user_can_login(self, request, user):
"Over-ride for things like user bans or account e-mail validation"
return user.is_active
def show_pick_account(self, request, openid):
"""
The user's OpenID is associated with more than one account - ask them
which one they would like to sign in as
"""
return self.render(request, self.pick_account_template, {
'action': urljoin(request.path, '../pick/'),
'openid': openid,
'users': self.lookup_openid(request, openid),
})
def do_pick(self, request):
# User MUST be logged in with an OpenID and it MUST be associated
# with the selected account. The error messages in here are a bit
# weird, unfortunately.
if not request.openid:
return self.show_error(request, 'You should be logged in here')
users = self.lookup_openid(request, request.openid.openid)
try:
user_id = [
v.split('-')[1] for v in request.POST if v.startswith('user-')
][0]
user = [u for u in users if str(u.id) == user_id][0]
except IndexError, e:
return self.show_error(request, "You didn't pick a valid user")
# OK, log them in
self.log_in_user(request, user)
return self.on_login_complete(request, user, request.openid.openid)
def on_logged_out(self, request):
# After logging out the OpenID, log out the user auth session too
from django.contrib.auth import logout
response = super(AuthConsumer, self).on_logged_out(request)
logout(request)
return response
def show_unknown_openid(self, request, openid):
# This can be over-ridden to show a registration form
return self.show_message(
request, 'Unknown OpenID', '%s is an unknown OpenID' % openid
)
def show_you_cannot_login(self, request, user, openid):
return self.show_message(
request, 'You cannot log in',
'You cannot log in with that account'
)
def show_associate(self, request, openid=None):
"Screen that offers to associate an OpenID with a user's account"
if not request.user.is_authenticated():
return self.need_authenticated_user(request)
return self.render(request, self.show_associate_template, {
'action': urljoin(request.path, '../associate/'),
'user': request.user,
'specific_openid': openid,
'openid_token': signed.dumps(
# Use user.id as part of extra_key to prevent attackers from
# creating their own openid_token for use in CSRF attack
openid, extra_key = self.associate_salt + str(request.user.id)
),
})
def do_associate(self, request):
if request.method == 'POST':
try:
openid = signed.loads(
request.POST.get('openid_token', ''),
extra_key = self.associate_salt + str(request.user.id)
)
except signed.BadSignature:
return self.show_error(request, self.csrf_failed_message)
# Associate openid with their account, if it isn't already
if not request.user.openids.filter(openid = openid):
request.user.openids.create(openid = openid)
return self.show_associate_done(request, openid)
return self.show_error(request, 'Should POST to here')
def show_associate_done(self, request, openid):
response = self.redirect_if_valid_next(request)
if not response:
response = self.show_message(request, 'Associated',
self.openid_now_associated_message % openid
)
return response
def need_authenticated_user(self, request):
return self.show_error(request, self.need_authenticated_user_message)
def do_associations(self, request):
"Interface for managing your account's associated OpenIDs"
if not request.user.is_authenticated():
return self.need_authenticated_user(request)
message = None
if request.method == 'POST':
if 'todelete' in request.POST:
# Something needs deleting; find out what
try:
todelete = signed.loads(
request.POST['todelete'],
extra_key = self.associate_delete_salt
)
if todelete['user_id'] != request.user.id:
message = self.associate_tampering_message
else:
# It matches! Delete the OpenID relationship
request.user.openids.filter(
pk = todelete['association_id']
).delete()
message = self.association_deleted_message % (
todelete['openid']
)
except signed.BadSignature:
message = self.associate_tampering_message
# We construct a button to delete each existing association
openids = []
for association in request.user.openids.all():
openids.append({
'openid': association.openid,
'button': signed.dumps({
'user_id': request.user.id,
'association_id': association.id,
'openid': association.openid,
}, extra_key = self.associate_delete_salt),
})
return self.render(request, self.associations_template, {
'openids': openids,
'user': request.user,
'action': request.path,
'message': message,
'action_new': '../',
'associate_next': self.sign_next(request.path),
})
def do_recover(self, request, extra_message = None):
if request.method == 'POST':
submitted = request.POST.get('recover', '').strip()
user = None
if '@' not in submitted: # They entered a username
user = self.lookup_user_by_username(submitted)
else: # They entered an e-mail address
users = self.lookup_users_by_email(submitted)
if users:
if len(users) > 1:
extra_message = self.recovery_multiple_found_message
user = None
else:
user = users[0]
if user:
self.send_recovery_email(request, user)
return self.show_message(
request, 'E-mail sent', self.recovery_email_sent_message
)
else:
extra_message = self.recovery_not_found_message
return self.render(request, self.recover_template, {
'action': request.path,
'message': extra_message,
})
def lookup_users_by_email(self, email):
from django.contrib.auth.models import User
return list(User.objects.filter(email = email))
def lookup_user_by_username(self, username):
from django.contrib.auth.models import User
try:
return User.objects.get(username = username)
except User.DoesNotExist:
return None
def lookup_user_by_id(self, id):
from django.contrib.auth.models import User
try:
return User.objects.get(pk = id)
except User.DoesNotExist:
return None
def do_r(self, request, token = ''):
if not token:
# TODO: show a form where they can paste in their token?
raise Http404
token = token.rstrip('/').encode('utf8')
try:
value = signed.unsign(token, key = (
self.recovery_link_secret or settings.SECRET_KEY
) + self.recovery_link_salt)
except signed.BadSignature:
return self.show_message(
request, self.invalid_token_message,
self.invalid_token_message + ': ' + token
)
hex_days, hex_user_id = (value.split('.') + ['', ''])[:2]
days = hex_to_int(hex_days)
user_id = hex_to_int(hex_user_id)
user = self.lookup_user_by_id(user_id)
if not user: # Maybe the user was deleted?
return self.show_error(request, r_user_not_found_message)
# Has the token expired?
now_days = (datetime.date.today() - self.recovery_origin_date).days
if (now_days - days) > self.recovery_expires_after_days:
return self.render(request, self.recovery_expired_template, {
'days': self.recovery_expires_after_days,
'recover_url': urljoin(request.path, '../../recover/'),
})
# Token is valid! Log them in as that user and show the recovery page
self.log_in_user(request, user)
return self.render(request, self.recovery_complete_template, {
'change_password_url': urljoin(request.path, '../../password/'),
'associate_url': urljoin(request.path, '../../associations/'),
'user': user,
})
do_r.urlregex = '^r/([^/]+)/$'
def generate_recovery_code(self, user):
# Code is {hex-days}.{hex-userid}.{signature}
days = int_to_hex(
(datetime.date.today() - self.recovery_origin_date).days
)
token = '%s.%s' % (days, int_to_hex(user.id))
return signed.sign(token, key = (
self.recovery_link_secret or settings.SECRET_KEY
) + self.recovery_link_salt)
def send_recovery_email(self, request, user):
code = self.generate_recovery_code(user)
path = urljoin(request.path, '../r/%s/' % code)
url = request.build_absolute_uri(path)
email_body = self.render(request, self.recovery_email_template, {
'url': url,
'code': code,
'theuser': user,
}).content
send_mail(
subject = self.recovery_email_subject,
message = email_body,
from_email = self.recovery_email_from or \
settings.DEFAULT_FROM_EMAIL,
recipient_list = [user.email]
)
# Monkey-patch to add openid login form to the Django admin
def make_display_login_form_with_openid(bind_to_me, openid_path):
"openid_path is the path the OpenID login should submit to, e.g. /openid/"
from django.contrib.admin.sites import AdminSite
def display_login_form(request, error_message='',
extra_context=None):
extra_context = extra_context or {}
extra_context['openid_path'] = openid_path
return AdminSite.display_login_form(
bind_to_me, request, error_message, extra_context
)
return display_login_form
def monkeypatch_adminsite(admin_site, openid_path = '/openid/'):
admin_site.display_login_form = make_display_login_form_with_openid(
admin_site, openid_path
)
|
|
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Britta Westner <britta.wstnr@gmail.com>
#
# License: BSD 3 clause
import copy as cp
import os.path as op
import pytest
from numpy.testing import assert_array_equal, assert_allclose
import numpy as np
import mne
from mne.datasets import testing
from mne.beamformer import (make_dics, apply_dics, apply_dics_epochs,
apply_dics_csd, tf_dics, read_beamformer,
Beamformer)
from mne.beamformer._compute_beamformer import _prepare_beamformer_input
from mne.beamformer._dics import _prepare_noise_csd
from mne.time_frequency import csd_morlet
from mne.utils import object_diff, requires_h5py, catch_logging
from mne.proj import compute_proj_evoked, make_projector
from mne.surface import _compute_nearest
from mne.beamformer.tests.test_lcmv import _assert_weight_norm
from mne.time_frequency import CrossSpectralDensity
from mne.time_frequency.csd import _sym_mat_to_vector
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
@pytest.fixture(scope='module', params=[testing._pytest_param()])
def _load_forward():
"""Load forward models."""
fwd_free = mne.read_forward_solution(fname_fwd)
fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False)
fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False)
fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True,
use_cps=False)
fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True,
use_cps=False)
fwd_vol = mne.read_forward_solution(fname_fwd_vol)
return fwd_free, fwd_surf, fwd_fixed, fwd_vol
def _simulate_data(fwd, idx): # Somewhere on the frontal lobe by default
"""Simulate an oscillator on the cortex."""
source_vertno = fwd['src'][0]['vertno'][idx]
sfreq = 50. # Hz.
times = np.arange(10 * sfreq) / sfreq # 10 seconds of data
signal = np.sin(20 * 2 * np.pi * times) # 20 Hz oscillator
signal[:len(times) // 2] *= 2 # Make signal louder at the beginning
signal *= 1e-9 # Scale to be in the ballpark of MEG data
# Construct a SourceEstimate object that describes the signal at the
# cortical level.
stc = mne.SourceEstimate(
signal[np.newaxis, :],
vertices=[[source_vertno], []],
tmin=0,
tstep=1 / sfreq,
subject='sample',
)
# Create an info object that holds information about the sensors
info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
info.update(fwd['info']) # Merge in sensor position information
# heavily decimate sensors to make it much faster
info = mne.pick_info(info, np.arange(info['nchan'])[::5])
fwd = mne.pick_channels_forward(fwd, info['ch_names'])
# Run the simulated signal through the forward model, obtaining
# simulated sensor data.
raw = mne.apply_forward_raw(fwd, stc, info)
# Add a little noise
random = np.random.RandomState(42)
noise = random.randn(*raw._data.shape) * 1e-14
raw._data += noise
# Define a single epoch (weird baseline but shouldn't matter)
epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
tmax=raw.times[-1], baseline=(0., 0.), preload=True)
evoked = epochs.average()
# Compute the cross-spectral density matrix
csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5)
labels = mne.read_labels_from_annot(
'sample', hemi='lh', subjects_dir=subjects_dir)
label = [
label for label in labels if np.in1d(source_vertno, label.vertices)[0]]
assert len(label) == 1
label = label[0]
vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
source_ind = vertices.tolist().index(source_vertno)
assert vertices[source_ind] == source_vertno
return epochs, evoked, csd, source_vertno, label, vertices, source_ind
idx_param = pytest.mark.parametrize('idx', [0, 100, 200, 233])
def _rand_csd(rng, info):
scales = mne.make_ad_hoc_cov(info).data
n = scales.size
# Some random complex correlation structure (with channel scalings)
data = rng.randn(n, n) + 1j * rng.randn(n, n)
data = data @ data.conj().T
data *= scales
data *= scales[:, np.newaxis]
data.flat[::n + 1] = scales
return data
def _make_rand_csd(info, csd):
rng = np.random.RandomState(0)
data = _rand_csd(rng, info)
# now we need to have the same null space as the data csd
s, u = np.linalg.eigh(csd.get_data(csd.frequencies[0]))
mask = np.abs(s) >= s[-1] * 1e-7
rank = mask.sum()
assert rank == len(data) == len(info['ch_names'])
noise_csd = CrossSpectralDensity(
_sym_mat_to_vector(data), info['ch_names'], 0., csd.n_fft)
return noise_csd, rank
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_h5py
@idx_param
@pytest.mark.parametrize('whiten', (False, True))
def test_make_dics(tmpdir, _load_forward, idx, whiten):
"""Test making DICS beamformer filters."""
# We only test proper handling of parameters here. Testing the results is
# done in test_apply_dics_timeseries and test_apply_dics_csd.
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, _, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None)
if whiten:
noise_csd, rank = _make_rand_csd(epochs.info, csd)
assert rank == len(epochs.info['ch_names']) == 62
else:
noise_csd = None
epochs.pick_types(meg='grad')
with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="notexistent",
noise_csd=noise_csd)
with pytest.raises(ValueError, match='rank, if str'):
make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd)
with pytest.raises(TypeError, match='rank must be'):
make_dics(epochs.info, fwd_fixed, csd, rank=1., noise_csd=noise_csd)
# Test if fixed forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='forward operator with free ori'):
make_dics(epochs.info, fwd_fixed, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_free, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test if volume forward operator is detected when picking normal
# orientation
with pytest.raises(ValueError, match='oriented in surface coordinates'):
make_dics(epochs.info, fwd_vol, csd, pick_ori="normal",
noise_csd=noise_csd)
# Test invalid combinations of parameters
with pytest.raises(ValueError, match='reduce_rank cannot be used with'):
make_dics(epochs.info, fwd_free, csd, inversion='single',
reduce_rank=True, noise_csd=noise_csd)
# TODO: Restore this?
# with pytest.raises(ValueError, match='not stable with depth'):
# make_dics(epochs.info, fwd_free, csd, weight_norm='unit-noise-gain',
# inversion='single', depth=None)
# Sanity checks on the returned filters
n_freq = len(csd.frequencies)
vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno'])
n_verts = len(vertices)
n_orient = 3
n_channels = len(epochs.ch_names)
# Test return values
weight_norm = 'unit-noise-gain'
inversion = 'single'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert np.iscomplexobj(filters['weights'])
assert filters['csd'].ch_names == epochs.ch_names
assert isinstance(filters['csd'], CrossSpectralDensity)
assert filters['ch_names'] == epochs.ch_names
assert_array_equal(filters['proj'], np.eye(n_channels))
assert_array_equal(filters['vertices'][0], vertices)
assert_array_equal(filters['vertices'][1], []) # Label was on the LH
assert filters['subject'] == fwd_free['src']._subject
assert filters['pick_ori'] is None
assert filters['is_free_ori']
assert filters['inversion'] == inversion
assert filters['weight_norm'] == weight_norm
assert 'DICS' in repr(filters)
assert 'subject "sample"' in repr(filters)
assert str(len(vertices)) in repr(filters)
assert str(n_channels) in repr(filters)
assert 'rank' not in repr(filters)
_, noise_cov = _prepare_noise_csd(csd, noise_csd, real_filter=False)
_, _, _, _, G, _, _, _ = _prepare_beamformer_input(
epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None,
noise_cov=noise_cov)
G.shape = (n_channels, n_verts, n_orient)
G = G.transpose(1, 2, 0).conj() # verts, orient, ch
_assert_weight_norm(filters, G)
inversion = 'matrix'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
_assert_weight_norm(filters, G)
weight_norm = 'unit-noise-gain-invariant'
inversion = 'single'
filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None,
weight_norm=weight_norm, depth=None,
noise_csd=noise_csd, inversion=inversion)
_assert_weight_norm(filters, G)
# Test picking orientations. Also test weight norming under these different
# conditions.
weight_norm = 'unit-noise-gain'
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', weight_norm=weight_norm,
depth=None, noise_csd=noise_csd, inversion=inversion)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert not filters['is_free_ori']
_assert_weight_norm(filters, G)
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='max-power', weight_norm=weight_norm,
depth=None, noise_csd=noise_csd, inversion=inversion)
n_orient = 1
assert filters['weights'].shape == (n_freq, n_verts * n_orient, n_channels)
assert not filters['is_free_ori']
_assert_weight_norm(filters, G)
# From here on, only work on a single frequency
csd = csd[0]
# Test using a real-valued filter
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
pick_ori='normal', real_filter=True,
noise_csd=noise_csd)
assert not np.iscomplexobj(filters['weights'])
# Test forward normalization. When inversion='single', the power of a
# unit-noise CSD should be 1, even without weight normalization.
if not whiten:
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
# Using [:, :] syntax for in-place broadcasting
csd_noise._data[:, :] = np.eye(csd.n_channels)[inds][:, np.newaxis]
filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label,
weight_norm=None, depth=1., noise_csd=noise_csd,
inversion='single')
w = filters['weights'][0][:3]
assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-6,
atol=0)
# Test turning off both forward and weight normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
weight_norm=None, depth=None, noise_csd=noise_csd)
w = filters['weights'][0][:3]
assert not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0,
rtol=1e-2, atol=0)
# Test neural-activity-index weight normalization. It should be a scaled
# version of the unit-noise-gain beamformer.
filters_nai = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='nai', depth=None, noise_csd=noise_csd)
w_nai = filters_nai['weights'][0]
filters_ung = make_dics(
epochs.info, fwd_surf, csd, label=label, pick_ori='max-power',
weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd)
w_ung = filters_ung['weights'][0]
assert_allclose(np.corrcoef(np.abs(w_nai).ravel(),
np.abs(w_ung).ravel()), 1, atol=1e-7)
# Test whether spatial filter contains src_type
assert 'src_type' in filters
fname = op.join(str(tmpdir), 'filters-dics.h5')
filters.save(fname)
filters_read = read_beamformer(fname)
assert isinstance(filters, Beamformer)
assert isinstance(filters_read, Beamformer)
for key in ['tmin', 'tmax']: # deal with strictness of object_diff
setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key)))
assert object_diff(filters, filters_read) == ''
def _fwd_dist(power, fwd, vertices, source_ind, tidx=1):
idx = np.argmax(power.data[:, tidx])
rr_got = fwd['src'][0]['rr'][vertices[idx]]
rr_want = fwd['src'][0]['rr'][vertices[source_ind]]
return np.linalg.norm(rr_got - rr_want)
@idx_param
@pytest.mark.parametrize('inversion, weight_norm', [
('single', None),
('matrix', 'unit-noise-gain'),
])
def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm):
"""Test applying a DICS beamformer to a CSD matrix."""
fwd_free, fwd_surf, fwd_fixed, _ = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
reg = 1 # Lots of regularization for our toy dataset
with pytest.raises(ValueError, match='several sensor types'):
make_dics(epochs.info, fwd_free, csd)
epochs.pick_types(meg='grad')
# Try different types of forward models
assert label.hemi == 'lh'
for fwd in [fwd_free, fwd_surf, fwd_fixed]:
filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg,
inversion=inversion, weight_norm=weight_norm)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
# Did we find the true source at 20 Hz?
dist = _fwd_dist(power, fwd_free, vertices, source_ind)
assert dist == 0.
# Is the signal stronger at 20 Hz than 10?
assert power.data[source_ind, 1] > power.data[source_ind, 0]
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power'])
@pytest.mark.parametrize('inversion', ['single', 'matrix'])
@idx_param
def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx):
"""Test picking different orientations and inversion modes."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
epochs.pick_types(meg='grad')
reg_ = 5 if inversion == 'matrix' else 1
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, depth=None,
weight_norm='unit-noise-gain')
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
# This is 0. for unit-noise-gain-invariant:
assert dist <= (0.02 if inversion == 'matrix' else 0.)
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test unit-noise-gain weighting
csd_noise = csd.copy()
inds = np.triu_indices(csd.n_channels)
csd_noise._data[...] = np.eye(csd.n_channels)[inds][:, np.newaxis]
noise_power, f = apply_dics_csd(csd_noise, filters)
want_norm = 3 if pick_ori is None else 1.
assert_allclose(noise_power.data, want_norm, atol=1e-7)
# Test filter with forward normalization instead of weight
# normalization
filters = make_dics(epochs.info, fwd_surf, csd, label=label,
reg=reg_, pick_ori=pick_ori,
inversion=inversion, weight_norm=None,
depth=1.)
power, f = apply_dics_csd(csd, filters)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
mat_tol = {0: 0.055, 100: 0.20, 200: 0.015, 233: 0.035}[idx]
max_ = (mat_tol if inversion == 'matrix' else 0.)
assert 0 <= dist <= max_
assert power.data[source_ind, 1] > power.data[source_ind, 0]
def _nearest_vol_ind(fwd_vol, fwd, vertices, source_ind):
return _compute_nearest(
fwd_vol['source_rr'],
fwd['src'][0]['rr'][vertices][source_ind][np.newaxis])[0]
@idx_param
def test_real(_load_forward, idx):
"""Test using a real-valued filter."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, _, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
epochs.pick_types(meg='grad')
reg = 1 # Lots of regularization for our toy dataset
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg,
real_filter=True, inversion='single')
# Also test here that no warings are thrown - implemented to check whether
# src should not be None warning occurs:
with pytest.warns(None) as w:
power, f = apply_dics_csd(csd, filters_real)
assert len(w) == 0
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
assert dist == 0
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test rank reduction
filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5,
pick_ori='max-power', inversion='matrix',
reduce_rank=True)
power, f = apply_dics_csd(csd, filters_real)
assert f == [10, 20]
dist = _fwd_dist(power, fwd_surf, vertices, source_ind)
assert dist == 0
assert power.data[source_ind, 1] > power.data[source_ind, 0]
# Test computing source power on a volume source space
filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg,
inversion='single')
power, f = apply_dics_csd(csd, filters_vol)
vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind)
assert f == [10, 20]
dist = _fwd_dist(
power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind)
vol_tols = {100: 0.008, 200: 0.008}
assert dist <= vol_tols.get(idx, 0.)
assert power.data[vol_source_ind, 1] > power.data[vol_source_ind, 0]
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='spatial filter does not contain '
'src_type'):
apply_dics_csd(csd, filters_vol)
@pytest.mark.filterwarnings("ignore:The use of several sensor types with the"
":RuntimeWarning")
@idx_param
def test_apply_dics_timeseries(_load_forward, idx):
"""Test DICS applied to timeseries data."""
fwd_free, fwd_surf, fwd_fixed, fwd_vol = _load_forward
epochs, evoked, csd, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx)
reg = 5 # Lots of regularization for our toy dataset
with pytest.raises(ValueError, match='several sensor types'):
make_dics(evoked.info, fwd_surf, csd)
evoked.pick_types(meg='grad')
multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label,
reg=reg)
# Sanity checks on the resulting STC after applying DICS on evoked
stcs = apply_dics(evoked, multiple_filters)
assert isinstance(stcs, list)
assert len(stcs) == len(multiple_filters['weights'])
assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1])
assert_allclose(stcs[0].times, evoked.times)
# Applying filters for multiple frequencies on epoch data should fail
with pytest.raises(ValueError, match='computed for a single frequency'):
apply_dics_epochs(epochs, multiple_filters)
# From now on, only apply filters with a single frequency (20 Hz).
csd20 = csd.pick_frequency(20)
filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg,
inversion='single')
# Sanity checks on the resulting STC after applying DICS on epochs.
# Also test here that no warnings are thrown - implemented to check whether
# src should not be None warning occurs
with pytest.warns(None) as w:
stcs = apply_dics_epochs(epochs, filters)
assert len(w) == 0
assert isinstance(stcs, list)
assert len(stcs) == 1
assert_array_equal(stcs[0].vertices[0], filters['vertices'][0])
assert_array_equal(stcs[0].vertices[1], filters['vertices'][1])
assert_allclose(stcs[0].times, epochs.times)
# Did we find the source?
stc = (stcs[0] ** 2).mean()
dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
# Apply filters to evoked
stc = apply_dics(evoked, filters)
stc = (stc ** 2).mean()
dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
# Test if wrong channel selection is detected in application of filter
evoked_ch = cp.deepcopy(evoked)
evoked_ch.pick_channels(evoked_ch.ch_names[:-1])
with pytest.raises(ValueError, match='MEG 2633 which is not present'):
apply_dics(evoked_ch, filters)
# Test whether projections are applied, by adding a custom projection
filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label)
stc_noproj = apply_dics(evoked, filters_noproj)
evoked_proj = evoked.copy()
p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0)
proj_matrix = make_projector(p, evoked_proj.ch_names)[0]
evoked_proj.info['projs'] += p
filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label)
assert_array_equal(filters_proj['proj'], proj_matrix)
stc_proj = apply_dics(evoked_proj, filters_proj)
assert np.any(np.not_equal(stc_noproj.data, stc_proj.data))
# Test detecting incompatible projections
filters_proj['proj'] = filters_proj['proj'][:-1, :-1]
with pytest.raises(ValueError, match='operands could not be broadcast'):
apply_dics(evoked_proj, filters_proj)
# Test returning a generator
stcs = apply_dics_epochs(epochs, filters, return_generator=False)
stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True)
assert_array_equal(stcs[0].data, next(stcs_gen).data)
# Test computing timecourses on a volume source space
filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg,
inversion='single')
stc = apply_dics(evoked, filters_vol)
stc = (stc ** 2).mean()
assert stc.data.shape[1] == 1
vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind)
dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind,
tidx=0)
vol_tols = {100: 0.008, 200: 0.015}
vol_tol = vol_tols.get(idx, 0.)
assert dist <= vol_tol
# check whether a filters object without src_type throws expected warning
del filters_vol['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'):
apply_dics_epochs(epochs, filters_vol)
@pytest.mark.slowtest
@testing.requires_testing_data
@pytest.mark.filterwarnings('ignore:.*tf_dics is dep.*:DeprecationWarning')
def test_tf_dics(_load_forward):
"""Test 5D time-frequency beamforming based on DICS."""
fwd_free, fwd_surf, fwd_fixed, _ = _load_forward
# idx isn't really used so let's just simulate one
epochs, _, _, source_vertno, label, vertices, source_ind = \
_simulate_data(fwd_fixed, idx=0)
reg = 1 # Lots of regularization for our toy dataset
tmin = 0
tmax = 9
tstep = 4
win_lengths = [5, 5]
frequencies = [10, 20]
freq_bins = [(8, 12), (18, 22)]
with pytest.raises(ValueError, match='several sensor types'):
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
freq_bins=freq_bins, frequencies=frequencies,
decim=10, reg=reg, label=label)
epochs.pick_types(meg='grad')
# Compute DICS for two time windows and two frequencies
for mode in ['fourier', 'multitaper', 'cwt_morlet']:
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode=mode, freq_bins=freq_bins, frequencies=frequencies,
decim=10, reg=reg, label=label)
# Did we find the true source at 20 Hz?
dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=0)
assert dist == 0
dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=1)
assert dist == 0
# 20 Hz power should decrease over time
assert stcs[1].data[source_ind, 0] > stcs[1].data[source_ind, 1]
# 20 Hz power should be more than 10 Hz power at the true source
assert stcs[1].data[source_ind, 0] > stcs[0].data[source_ind, 0]
# Manually compute source power and compare with the last tf_dics result.
source_power = []
time_windows = [(0, 5), (4, 9)]
for time_window in time_windows:
csd = csd_morlet(epochs, frequencies=[frequencies[1]],
tmin=time_window[0], tmax=time_window[1], decim=10)
csd = csd.sum()
csd._data /= csd.n_fft
filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label,
inversion='single')
stc_source_power, _ = apply_dics_csd(csd, filters)
source_power.append(stc_source_power.data)
# Comparing tf_dics results with dics_source_power results
assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0)
# Test using noise csds. We're going to use identity matrices. That way,
# since we're using unit-noise-gain weight normalization, there should be
# no effect.
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='cwt_morlet', frequencies=frequencies, decim=10,
reg=reg, label=label, depth=None,
weight_norm='unit-noise-gain')
noise_csd = csd.copy()
inds = np.triu_indices(csd.n_channels)
# Using [:, :] syntax for in-place broadcasting
noise_csd._data[:, :] = 2 * np.eye(csd.n_channels)[inds][:, np.newaxis]
noise_csd.n_fft = 2 # Dividing by n_fft should yield an identity CSD
noise_csds = [noise_csd, noise_csd] # Two frequency bins
stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep,
win_lengths, mode='cwt_morlet',
frequencies=frequencies, decim=10, reg=reg,
label=label, depth=None,
weight_norm='unit-noise-gain')
assert_allclose(3 * stcs_norm[0].data, stcs[0].data, atol=0)
assert_allclose(3 * stcs_norm[1].data, stcs[1].data, atol=0)
# Test invalid parameter combinations
with pytest.raises(ValueError, match='fourier.*freq_bins" parameter'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='fourier', freq_bins=None)
with pytest.raises(ValueError, match='cwt_morlet.*frequencies" param'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='cwt_morlet', frequencies=None)
# Test if incorrect number of noise CSDs is detected
with pytest.raises(ValueError, match='One noise CSD object expected per'):
tf_dics(epochs, fwd_surf, [noise_csds[0]], tmin, tmax, tstep,
win_lengths, freq_bins=freq_bins)
# Test if freq_bins and win_lengths incompatibility is detected
with pytest.raises(ValueError, match='One time window length expected'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths=[0, 1, 2], freq_bins=freq_bins)
# Test if time step exceeding window lengths is detected
with pytest.raises(ValueError, match='Time step should not be larger'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep=0.15,
win_lengths=[0.2, 0.1], freq_bins=freq_bins)
# Test if incorrect number of n_ffts is detected
with pytest.raises(ValueError, match='When specifying number of FFT'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths, freq_bins=freq_bins, n_ffts=[1])
# Test if incorrect number of mt_bandwidths is detected
with pytest.raises(ValueError, match='When using multitaper mode and'):
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths=win_lengths, freq_bins=freq_bins,
mode='multitaper', mt_bandwidths=[20])
# Test if 'cwt_morlet' mode works with both fixed cycle numbers and lists
# of cycle numbers
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths, frequencies=frequencies, mode='cwt_morlet',
cwt_n_cycles=7)
tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep,
win_lengths, frequencies=frequencies, mode='cwt_morlet',
cwt_n_cycles=[5., 7.])
# Test if subtracting evoked responses yields NaN's, since we only have one
# epoch. Suppress division warnings.
assert len(epochs) == 1, len(epochs)
with np.errstate(invalid='ignore'):
stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths,
mode='cwt_morlet', frequencies=frequencies,
subtract_evoked=True, reg=reg, label=label, decim=20)
assert np.all(np.isnan(stcs[0].data))
def _cov_as_csd(cov, info):
rng = np.random.RandomState(0)
assert cov['data'].ndim == 2
assert len(cov['data']) == len(cov['names'])
# we need to make this have at least some complex structure
data = cov['data'] + 1e-1 * _rand_csd(rng, info)
assert data.dtype == np.complex128
return CrossSpectralDensity(_sym_mat_to_vector(data), cov['names'], 0., 16)
# Just test free ori here (assume fixed is same as LCMV if these are)
# Changes here should be synced with test_lcmv.py
@pytest.mark.parametrize(
'reg, pick_ori, weight_norm, use_cov, depth, lower, upper, real_filter', [
(0.05, None, 'unit-noise-gain-invariant', False, None, 26, 28, False),
(0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, False),
(0.05, None, 'unit-noise-gain-invariant', True, None, 40, 42, True),
(0.05, None, 'unit-noise-gain', False, None, 13, 14, False),
(0.05, None, 'unit-noise-gain', True, None, 35, 37, False),
(0.05, None, 'nai', True, None, 35, 37, False),
(0.05, None, None, True, None, 12, 14, False),
(0.05, None, None, True, 0.8, 39, 43, False),
(0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20,
False),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, False),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20, True),
(0.05, 'max-power', 'nai', True, None, 21, 24, False),
(0.05, 'max-power', None, True, None, 7, 10, False),
(0.05, 'max-power', None, True, 0.8, 15, 18, False),
# skip most no-reg tests, assume others are equal to LCMV if these are
(0.00, None, None, True, None, 21, 32, False),
(0.00, 'max-power', None, True, None, 13, 19, False),
])
def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm,
use_cov, depth, lower, upper, real_filter):
"""Test localization bias for free-orientation DICS."""
evoked, fwd, noise_cov, data_cov, want = bias_params_free
noise_csd = _cov_as_csd(noise_cov, evoked.info)
data_csd = _cov_as_csd(data_cov, evoked.info)
del noise_cov, data_cov
if not use_cov:
evoked.pick_types(meg='grad')
noise_csd = None
loc = apply_dics(evoked, make_dics(
evoked.info, fwd, data_csd, reg, noise_csd, pick_ori=pick_ori,
weight_norm=weight_norm, depth=depth, real_filter=real_filter)).data
loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc)
# Compute the percentage of sources for which there is no loc bias:
perc = (want == np.argmax(loc, axis=0)).mean() * 100
assert lower <= perc <= upper
@testing.requires_testing_data
@idx_param
@pytest.mark.parametrize('whiten', (False, True))
def test_make_dics_rank(_load_forward, idx, whiten):
"""Test making DICS beamformer filters with rank param."""
_, fwd_surf, fwd_fixed, _ = _load_forward
epochs, _, csd, _, label, _, _ = _simulate_data(fwd_fixed, idx)
if whiten:
noise_csd, want_rank = _make_rand_csd(epochs.info, csd)
kind = 'mag + grad'
else:
noise_csd = None
epochs.pick_types(meg='grad')
want_rank = len(epochs.ch_names)
assert want_rank == 41
kind = 'grad'
with catch_logging() as log:
filters = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
verbose=True)
log = log.getvalue()
assert f'Estimated rank ({kind}): {want_rank}' in log, log
stc, _ = apply_dics_csd(csd, filters)
other_rank = want_rank - 1 # shouldn't make a huge difference
use_rank = dict(meg=other_rank)
if not whiten:
# XXX it's a bug that our rank functions don't treat "meg"
# properly here...
use_rank['grad'] = use_rank.pop('meg')
with catch_logging() as log:
filters_2 = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
rank=use_rank, verbose=True)
log = log.getvalue()
assert f'Computing rank from covariance with rank={use_rank}' in log, log
stc_2, _ = apply_dics_csd(csd, filters_2)
corr = np.corrcoef(stc_2.data.ravel(), stc.data.ravel())[0, 1]
assert 0.8 < corr < 0.99999
# degenerate conditions
if whiten:
# make rank deficient
data = noise_csd.get_data(0.)
data[0] = data[:0] = 0
noise_csd._data[:, 0] = _sym_mat_to_vector(data)
with pytest.raises(ValueError, match='meg data rank.*the noise rank'):
filters = make_dics(
epochs.info, fwd_surf, csd, label=label, noise_csd=noise_csd,
verbose=True)
|
|
#! /usr/bin/env python3
#
# Copyright 2017 Linaro Limited
# Copyright (c) 2018-2019, Arm Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import argparse
from imgtool_lib import keys
from imgtool_lib import image
from imgtool_lib import version
import sys
import macro_parser
import fileinput
sign_bin_size_re = re.compile(r"^\s*RE_SIGN_BIN_SIZE\s*=\s*(.*)")
image_load_address_re = re.compile(r"^\s*RE_IMAGE_LOAD_ADDRESS\s*=\s*(.*)")
# Returns the last version number if present, or None if not
def get_last_version(path):
if (os.path.isfile(path) == False): # Version file not present
return None
else: # Version file is present, check it has a valid number inside it
with open(path, "r") as oldFile:
fileContents = oldFile.read()
if version.version_re.match(fileContents): # number is valid
return version.decode_version(fileContents)
else:
return None
def next_version_number(args, defaultVersion, path):
newVersion = None
versionProvided = False
if (version.compare(args.version, defaultVersion) == 0): # Default version
lastVersion = get_last_version(path)
if (lastVersion is not None):
newVersion = version.increment_build_num(lastVersion)
else:
newVersion = version.increment_build_num(defaultVersion)
else: # Version number has been explicitly provided (not using the default)
versionProvided = True
newVersion = args.version
versionString = "{a}.{b}.{c}+{d}".format(
a=str(newVersion.major),
b=str(newVersion.minor),
c=str(newVersion.revision),
d=str(newVersion.build)
)
if not versionProvided:
with open(path, "w") as newFile:
newFile.write(versionString)
print("**[INFO]** Image version number set to " + versionString)
return newVersion
def gen_rsa2048(args):
keys.RSAutil.generate().export_private(args.key)
def gen_rsa3072(args):
keys.RSAutil.generate(key_size=3072).export_private(args.key)
keygens = {
'rsa-2048': gen_rsa2048,
'rsa-3072': gen_rsa3072, }
def do_keygen(args):
if args.type not in keygens:
msg = "Unexpected key type: {}".format(args.type)
raise argparse.ArgumentTypeError(msg)
keygens[args.type](args)
def do_getpub(args):
key = keys.load(args.key)
if args.lang == 'c':
key.emit_c()
else:
msg = "Unsupported language, valid are: c"
raise argparse.ArgumentTypeError(msg)
def do_sign(args):
if args.rsa_pkcs1_15:
keys.sign_rsa_pss = False
version_num = next_version_number(args,
version.decode_version("0"),
"lastVerNum.txt")
if args.security_counter is None:
# Security counter has not been explicitly provided,
# generate it from the version number
args.security_counter = ((version_num.major << 24)
+ (version_num.minor << 16)
+ version_num.revision)
if "_s.c" in args.layout:
sw_type = "SPE"
elif "_ns.c" in args.layout:
sw_type = "NSPE"
else:
sw_type = "NSPE_SPE"
pad_size = macro_parser.evaluate_macro(args.layout, sign_bin_size_re, 0, 1)
img = image.Image.load(args.infile,
version=version_num,
header_size=args.header_size,
security_cnt=args.security_counter,
included_header=args.included_header,
pad=pad_size)
key = keys.load(args.key, args.public_key_format) if args.key else None
ram_load_address = macro_parser.evaluate_macro(args.layout, image_load_address_re, 0, 1)
img.sign(sw_type, key, ram_load_address, args.dependencies)
if pad_size:
img.pad_to(pad_size, args.align)
img.save(args.outfile)
def do_flash(args):
image_value_re = re.compile(r"^\s*"+args.macro+"\s*=\s*(.*)")
value = macro_parser.evaluate_macro(args.layout, image_value_re, 0, 1,
True)
if args.setting == 1:
begin_line="set "+args.begin
else:
begin_line=args.begin
for line in fileinput.input(args.infile, inplace=True):
if line.startswith(begin_line):
if args.division:
value = int(value/int(args.division))
if args.phexa == 0:
line = begin_line+"="+str(value)+"\n"
else:
line = begin_line+"="+hex(value)+"\n"
sys.stdout.write(line)
subcmds = {
'keygen': do_keygen,
'getpub': do_getpub,
'sign': do_sign,
'flash': do_flash, }
def get_dependencies(text):
if text is not None:
versions = []
images = re.findall(r"\((\d+)", text)
if len(images) == 0:
msg = "Image dependency format is invalid: {}".format(text)
raise argparse.ArgumentTypeError(msg)
raw_versions = re.findall(r",\s*([0-9.+]+)\)", text)
if len(images) != len(raw_versions):
msg = '''There's a mismatch between the number of dependency images
and versions in: {}'''.format(text)
raise argparse.ArgumentTypeError(msg)
for raw_version in raw_versions:
try:
versions.append(version.decode_version(raw_version))
except ValueError as e:
print(e)
dependencies = dict()
dependencies[image.DEP_IMAGES_KEY] = images
dependencies[image.DEP_VERSIONS_KEY] = versions
return dependencies
def alignment_value(text):
value = int(text)
if value not in [1, 2, 4, 8]:
msg = "{} must be one of 1, 2, 4 or 8".format(value)
raise argparse.ArgumentTypeError(msg)
return value
def intparse(text):
"""Parse a command line argument as an integer.
Accepts 0x and other prefixes to allow other bases to be used."""
return int(text, 0)
def args():
parser = argparse.ArgumentParser()
subs = parser.add_subparsers(help='subcommand help', dest='subcmd')
keygenp = subs.add_parser('keygen', help='Generate pub/private keypair')
keygenp.add_argument('-k', '--key', metavar='filename', required=True)
keygenp.add_argument('-t', '--type', metavar='type',
choices=keygens.keys(), required=True)
getpub = subs.add_parser('getpub', help='Get public key from keypair')
getpub.add_argument('-k', '--key', metavar='filename', required=True)
getpub.add_argument('-l', '--lang', metavar='lang', default='c')
sign = subs.add_parser('sign', help='Sign an image with a private key')
sign.add_argument('-l', '--layout', required=True,
help='Location of the file that contains preprocessed macros')
sign.add_argument('-k', '--key', metavar='filename')
sign.add_argument("-K", "--public-key-format",
help='In what format to add the public key to the image manifest: full or hash',
metavar='pub_key_format', choices=['full', 'hash'], default='hash')
sign.add_argument("--align", type=alignment_value, required=True)
sign.add_argument("-v", "--version", type=version.decode_version,
default="0.0.0+0")
sign.add_argument("-d", "--dependencies", type=get_dependencies,
required=False, help='''Add dependence on another image,
format: "(<image_ID>,<image_version>), ... "''')
sign.add_argument("-s", "--security-counter", type=intparse,
help='Specify explicitly the security counter value')
sign.add_argument("-H", "--header-size", type=intparse, required=True)
sign.add_argument("--included-header", default=False, action='store_true',
help='Image has gap for header')
sign.add_argument("--rsa-pkcs1-15",
help='Use old PKCS#1 v1.5 signature algorithm',
default=False, action='store_true')
sign.add_argument("infile")
sign.add_argument("outfile")
flash = subs.add_parser('flash', help='modify flash script')
flash.add_argument("infile")
flash.add_argument('-l', '--layout', required=True,
help='Location of the file that contains preprocessed macros')
flash.add_argument('-m', '--macro', required =True,
help='macro symbol string to grep in preprocessed file')
flash.add_argument('-b', '--begin', required=True,
help='begin of line to replace ')
flash.add_argument('-s', '--setting',type=intparse,required=False,default=0,
help='search for window batch set variable')
flash.add_argument('-d', '--division',
required=False,type=intparse,default=0,
help='search for window batch set variable')
flash.add_argument('-p', '--phexa',
required=False,type=intparse,default=1,
help='print value in hexa')
args = parser.parse_args()
if args.subcmd is None:
print('Must specify a subcommand', file=sys.stderr)
sys.exit(1)
subcmds[args.subcmd](args)
if __name__ == '__main__':
args()
|
|
from funcs import *
from db import *
##
# GETTERS
##
@app.route('/'+app.config['RNG_ID']+'/data/get/<path:path>')
def get_log(path):
complete_path = os.path.join(app.config['UPLOAD_FOLDER'], path)
if not os.path.exists(complete_path):
print "complete path %s does not exists" % complete_path
return abort(404)
if not os.path.abspath(complete_path).startswith( os.path.abspath(app.config['UPLOAD_FOLDER']) ):
print "path %s does not starts with upload folder %s" % ( os.path.abspath(complete_path), os.path.abspath(app.config['UPLOAD_FOLDER']) )
return abort(404)
##complete_path = os.path.join(root_dir(), app.config['DB_NAME'])
##ext = os.path.splitext(path)[1]
##mimetype = mimetypes.get(ext, "text/html")
#content = get_file(complete_path)
##return Response(content, mimetype=mimetype)
#return Response(content)
return send_from_directory(app.config['UPLOAD_FOLDER'], path, as_attachment=True, attachment_filename=path)
@app.route('/'+app.config['RNG_ID']+'/data/del/<path:path>')
def get_del(path):
print 'path',path
print 'UPLOAD_FOLDER', app.config['UPLOAD_FOLDER']
complete_path = os.path.join(app.config['UPLOAD_FOLDER'], path)
print 'complete_path', complete_path
if not os.path.exists(complete_path):
print "complete path %s does not exists" % complete_path
return abort(404)
if not os.path.abspath(complete_path).startswith( os.path.abspath(app.config['UPLOAD_FOLDER']) ):
print "path %s does not starts with upload folder %s" % ( os.path.abspath(complete_path), os.path.abspath(app.config['UPLOAD_FOLDER']) )
return abort(404)
try:
r = Data.query.filter_by(filepath=path).first()
db.session.delete(r)
db.session.commit()
os.remove(complete_path)
res = { 'del': path, 'error': False }
return jsonify(res), 200
except Exception as e:
print "error", e
return jsonify({ 'del': path, 'error': True, 'message': str(e) }), 501
##complete_path = os.path.join(root_dir(), app.config['DB_NAME'])
##ext = os.path.splitext(path)[1]
##mimetype = mimetypes.get(ext, "text/html")
#content = get_file(complete_path)
##return Response(content, mimetype=mimetype)
#return Response(content)
@app.route('/'+app.config['RNG_ID']+'/data/add/<pi_id>', methods=['POST'])
def add(pi_id):
file = request.files['foto']
filename = file.filename
filesize = request.content_length
path = os.path.join(app.config['UPLOAD_FOLDER'], pi_id )
dst = os.path.join(pi_id , secure_filename(filename))
dstn = os.path.join(app.config['UPLOAD_FOLDER'], dst )
if is_too_late():
print "Too late. go to sleep. I'm not storing your dumb file"
return jsonify({'pi_id': pi_id, 'filename': filename, 'filesize': filesize, 'error': False, 'extra': "too late. go to sleep. I'm not storing your dumb file"}), 200
else:
print "So nice of you to work this hard. Move along."
if not os.path.exists(path):
os.makedirs(path)
file.save(dstn)
print "ADDING ID %s NAME %s SIZE %d PATH %s" % (pi_id, filename, filesize, dst)
info = Data(pi_id, filename, filesize, dst)
db.session.add(info)
try:
db.session.commit()
return jsonify({'pi_id': pi_id, 'filename': filename, 'filesize': filesize, 'error': False}), 200
except:
return jsonify({'pi_id': pi_id, 'filename': filename, 'filesize': filesize, 'error': True, 'message': 'record exists'}), 501
##
# LISTS
##
@app.route('/'+app.config['RNG_ID']+'/data/list/all/', defaults={'pi_id': None})
@app.route('/'+app.config['RNG_ID']+'/data/list/all/<pi_id>/')
def list_data_all(pi_id):
try:
if pi_id is None:
data = Data.query.all()
else:
data = Data.query.filter( Data.pi_id == pi_id ).all()
data = [ x.as_dict() for x in data ]
return jsonify({'ids': data, 'error': False}), 200
except Exception as e:
return jsonify({'message': str(e), 'error': True}), 200
@app.route('/'+app.config['RNG_ID']+'/data/list/last/', defaults={'pi_id': None})
@app.route('/'+app.config['RNG_ID']+'/data/list/last/<pi_id>/')
def list_data_last(pi_id):
try:
if pi_id is None:
data = Data.query.group_by(Data.pi_id)
else:
data = Data.query.filter( Data.pi_id == pi_id ).group_by(Data.pi_id)
data = [ x.as_dict() for x in data ]
return jsonify({'ids': data, 'error': False}), 200
except Exception as e:
return jsonify({'message': str(e), 'error': True}), 200
@app.route('/'+app.config['RNG_ID']+'/data/list/ids/')
def list_data_ids():
try:
data = sorted( [ x.pi_id for x in db.session.query(Data.pi_id).distinct() ] )
return jsonify({'ids': data, 'error': False}), 200
except Exception as e:
return jsonify({'message': str(e), 'error': True}), 200
@app.route('/'+app.config['RNG_ID']+'/data/list/filepath/', defaults={'pi_id': None})
@app.route('/'+app.config['RNG_ID']+'/data/list/filepath/<pi_id>/')
def list_data_filepath(pi_id):
try:
if pi_id is None:
data = sorted( [ x.filepath for x in db.session.query(Data.filepath).distinct() ] )
else:
data = sorted( [ x.filepath for x in db.session.query(Data.filepath).filter(Data.pi_id == pi_id).distinct() ] )
return jsonify({'filepath': data, 'error': False}), 200
except Exception as e:
return jsonify({'message': str(e), 'error': True}), 200
@app.route('/'+app.config['RNG_ID']+'/data/list/filename/', defaults={'pi_id': None})
@app.route('/'+app.config['RNG_ID']+'/data/list/filename/<pi_id>/')
def list_data_filename(pi_id):
try:
if pi_id is None:
data = sorted( [ x.filename for x in db.session.query(Data.filename).distinct() ] )
else:
data = sorted( [ x.filename for x in db.session.query(Data.filename).filter(Data.pi_id == pi_id).distinct() ] )
return jsonify({'filename': data, 'error': False}), 200
except Exception as e:
return jsonify({'message': str(e), 'error': True}), 200
##
# DISPLAY
##
@app.route('/'+app.config['RNG_ID']+'/data/show/all/', defaults={'pi_id': None})
@app.route('/'+app.config['RNG_ID']+'/data/show/all/<pi_id>/')
def display_image_all(pi_id):
try:
if pi_id is None:
res = gen_table( Data.query.all(), app.config['RNG_ID'] )
else:
res = gen_table( Data.query.filter(Data.pi_id == pi_id).all(), app.config['RNG_ID'] )
return res, 200
except Exception as e:
print "error", e
return jsonify({ 'error': True, 'message': str(e) }), 501
@app.route('/'+app.config['RNG_ID']+'/data/show/last/', defaults={'pi_id': None})
@app.route('/'+app.config['RNG_ID']+'/data/show/last/<pi_id>/')
def display_image_last(pi_id):
meta = """<head><meta http-equiv="refresh" content="300" /></head>"""
try:
if pi_id is None:
res = gen_table( Data.query.group_by(Data.pi_id), app.config['RNG_ID'], meta=meta )
else:
res = gen_table( Data.query.group_by(Data.pi_id).filter(Data.pi_id == pi_id), app.config['RNG_ID'], meta=meta )
return res, 200
except Exception as e:
print "error", e
return jsonify({ 'error': True, 'message': str(e) }), 501
|
|
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions for BoltOn method."""
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.python.framework import ops as _ops
from tensorflow.compat.v1.python.keras import losses
from tensorflow.compat.v1.python.keras.regularizers import L1L2
from tensorflow.compat.v1.python.keras.utils import losses_utils
from tensorflow.compat.v1.python.platform import tf_logging as logging
class StrongConvexMixin:
"""Strong Convex Mixin base class.
Strong Convex Mixin base class for any loss function that will be used with
BoltOn model. Subclasses must be strongly convex and implement the
associated constants. They must also conform to the requirements of tf losses
(see super class).
For more details on the strong convexity requirements, see:
Bolt-on Differential Privacy for Scalable Stochastic Gradient
Descent-based Analytics by Xi Wu et. al.
"""
def radius(self):
"""Radius, R, of the hypothesis space W.
W is a convex set that forms the hypothesis space.
Returns:
R
"""
raise NotImplementedError("Radius not implemented for StrongConvex Loss"
"function: %s" % str(self.__class__.__name__))
def gamma(self):
"""Returns strongly convex parameter, gamma."""
raise NotImplementedError("Gamma not implemented for StrongConvex Loss"
"function: %s" % str(self.__class__.__name__))
def beta(self, class_weight):
"""Smoothness, beta.
Args:
class_weight: the class weights as scalar or 1d tensor, where its
dimensionality is equal to the number of outputs.
Returns:
Beta
"""
raise NotImplementedError("Beta not implemented for StrongConvex Loss"
"function: %s" % str(self.__class__.__name__))
def lipchitz_constant(self, class_weight):
"""Lipchitz constant, L.
Args:
class_weight: class weights used
Returns: L
"""
raise NotImplementedError("lipchitz constant not implemented for "
"StrongConvex Loss"
"function: %s" % str(self.__class__.__name__))
def kernel_regularizer(self):
"""Returns the kernel_regularizer to be used.
Any subclass should override this method if they want a kernel_regularizer
(if required for the loss function to be StronglyConvex.
"""
return None
def max_class_weight(self, class_weight, dtype):
"""The maximum weighting in class weights (max value) as a scalar tensor.
Args:
class_weight: class weights used
dtype: the data type for tensor conversions.
Returns:
maximum class weighting as tensor scalar
"""
class_weight = _ops.convert_to_tensor_v2(class_weight, dtype)
return tf.math.reduce_max(class_weight)
class StrongConvexHuber(losses.Loss, StrongConvexMixin):
"""Strong Convex version of Huber loss using l2 weight regularization."""
def __init__(self,
reg_lambda,
c_arg,
radius_constant,
delta,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
dtype=tf.float32):
"""Constructor.
Args:
reg_lambda: Weight regularization constant
c_arg: Penalty parameter C of the loss term
radius_constant: constant defining the length of the radius
delta: delta value in huber loss. When to switch from quadratic to
absolute deviation.
reduction: reduction type to use. See super class
dtype: tf datatype to use for tensor conversions.
Returns:
Loss values per sample.
"""
if c_arg <= 0:
raise ValueError("c: {0}, should be >= 0".format(c_arg))
if reg_lambda <= 0:
raise ValueError("reg lambda: {0} must be positive".format(reg_lambda))
if radius_constant <= 0:
raise ValueError(
"radius_constant: {0}, should be >= 0".format(radius_constant))
if delta <= 0:
raise ValueError("delta: {0}, should be >= 0".format(delta))
self.C = c_arg # pylint: disable=invalid-name
self.delta = delta
self.radius_constant = radius_constant
self.dtype = dtype
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
super().__init__(reduction=reduction, name="strongconvexhuber")
def call(self, y_true, y_pred):
"""Computes loss.
Args:
y_true: Ground truth values. One hot encoded using -1 and 1.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
h = self.delta
z = y_pred * y_true
one = tf.constant(1, dtype=self.dtype)
four = tf.constant(4, dtype=self.dtype)
if z > one + h: # pylint: disable=no-else-return
return _ops.convert_to_tensor_v2(0, dtype=self.dtype)
elif tf.math.abs(one - z) <= h:
return one / (four * h) * tf.math.pow(one + h - z, 2)
return one - z
def radius(self):
"""See super class."""
return self.radius_constant / self.reg_lambda
def gamma(self):
"""See super class."""
return self.reg_lambda
def beta(self, class_weight):
"""See super class."""
max_class_weight = self.max_class_weight(class_weight, self.dtype)
delta = _ops.convert_to_tensor_v2(self.delta, dtype=self.dtype)
return self.C * max_class_weight / (
delta * tf.constant(2, dtype=self.dtype)) + self.reg_lambda
def lipchitz_constant(self, class_weight):
"""See super class."""
# if class_weight is provided,
# it should be a vector of the same size of number of classes
max_class_weight = self.max_class_weight(class_weight, self.dtype)
lc = self.C * max_class_weight + self.reg_lambda * self.radius()
return lc
def kernel_regularizer(self):
"""Return l2 loss using 0.5*reg_lambda as the l2 term (as desired).
L2 regularization is required for this loss function to be strongly convex.
Returns:
The L2 regularizer layer for this loss function, with regularizer constant
set to half the 0.5 * reg_lambda.
"""
return L1L2(l2=self.reg_lambda / 2)
class StrongConvexBinaryCrossentropy(losses.BinaryCrossentropy,
StrongConvexMixin):
"""Strongly Convex BinaryCrossentropy loss using l2 weight regularization."""
def __init__(self,
reg_lambda,
c_arg,
radius_constant,
from_logits=True,
label_smoothing=0,
reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,
dtype=tf.float32):
"""StrongConvexBinaryCrossentropy class.
Args:
reg_lambda: Weight regularization constant
c_arg: Penalty parameter C of the loss term
radius_constant: constant defining the length of the radius
from_logits: True if the input are unscaled logits. False if they are
already scaled.
label_smoothing: amount of smoothing to perform on labels relaxation of
trust in labels, e.g. (1 -> 1-x, 0 -> 0+x). Note, the impact of this
parameter's effect on privacy is not known and thus the default should
be used.
reduction: reduction type to use. See super class
dtype: tf datatype to use for tensor conversions.
"""
if label_smoothing != 0:
logging.warning("The impact of label smoothing on privacy is unknown. "
"Use label smoothing at your own risk as it may not "
"guarantee privacy.")
if reg_lambda <= 0:
raise ValueError("reg lambda: {0} must be positive".format(reg_lambda))
if c_arg <= 0:
raise ValueError("c: {0}, should be >= 0".format(c_arg))
if radius_constant <= 0:
raise ValueError(
"radius_constant: {0}, should be >= 0".format(radius_constant))
self.dtype = dtype
self.C = c_arg # pylint: disable=invalid-name
self.reg_lambda = tf.constant(reg_lambda, dtype=self.dtype)
super().__init__(
reduction=reduction,
name="strongconvexbinarycrossentropy",
from_logits=from_logits,
label_smoothing=label_smoothing,
)
self.radius_constant = radius_constant
def call(self, y_true, y_pred):
"""Computes loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
loss = super(StrongConvexBinaryCrossentropy, self).call(y_true, y_pred)
loss = loss * self.C
return loss
def radius(self):
"""See super class."""
return self.radius_constant / self.reg_lambda
def gamma(self):
"""See super class."""
return self.reg_lambda
def beta(self, class_weight):
"""See super class."""
max_class_weight = self.max_class_weight(class_weight, self.dtype)
return self.C * max_class_weight + self.reg_lambda
def lipchitz_constant(self, class_weight):
"""See super class."""
max_class_weight = self.max_class_weight(class_weight, self.dtype)
return self.C * max_class_weight + self.reg_lambda * self.radius()
def kernel_regularizer(self):
"""Return l2 loss using 0.5*reg_lambda as the l2 term (as desired).
L2 regularization is required for this loss function to be strongly convex.
Returns:
The L2 regularizer layer for this loss function, with regularizer constant
set to half the 0.5 * reg_lambda.
"""
return L1L2(l2=self.reg_lambda / 2)
|
|
"""Test report state."""
from unittest.mock import patch
from homeassistant import core
from homeassistant.components.alexa import state_report
from . import DEFAULT_CONFIG, TEST_URL
async def test_report_state(hass, aioclient_mock):
"""Test proactive state reports."""
aioclient_mock.post(TEST_URL, text="", status=202)
hass.states.async_set(
"binary_sensor.test_contact",
"on",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
await state_report.async_enable_proactive_mode(hass, DEFAULT_CONFIG)
hass.states.async_set(
"binary_sensor.test_contact",
"off",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
# To trigger event listener
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 1
call = aioclient_mock.mock_calls
call_json = call[0][2]
assert call_json["event"]["header"]["namespace"] == "Alexa"
assert call_json["event"]["header"]["name"] == "ChangeReport"
assert (
call_json["event"]["payload"]["change"]["properties"][0]["value"]
== "NOT_DETECTED"
)
assert call_json["event"]["endpoint"]["endpointId"] == "binary_sensor#test_contact"
async def test_report_state_instance(hass, aioclient_mock):
"""Test proactive state reports with instance."""
aioclient_mock.post(TEST_URL, text="", status=202)
hass.states.async_set(
"fan.test_fan",
"off",
{
"friendly_name": "Test fan",
"supported_features": 3,
"speed": "off",
"speed_list": ["off", "low", "high"],
"oscillating": False,
},
)
await state_report.async_enable_proactive_mode(hass, DEFAULT_CONFIG)
hass.states.async_set(
"fan.test_fan",
"on",
{
"friendly_name": "Test fan",
"supported_features": 3,
"speed": "high",
"speed_list": ["off", "low", "high"],
"oscillating": True,
},
)
# To trigger event listener
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 1
call = aioclient_mock.mock_calls
call_json = call[0][2]
assert call_json["event"]["header"]["namespace"] == "Alexa"
assert call_json["event"]["header"]["name"] == "ChangeReport"
change_reports = call_json["event"]["payload"]["change"]["properties"]
for report in change_reports:
if report["name"] == "toggleState":
assert report["value"] == "ON"
assert report["instance"] == "fan.oscillating"
assert report["namespace"] == "Alexa.ToggleController"
assert call_json["event"]["endpoint"]["endpointId"] == "fan#test_fan"
async def test_send_add_or_update_message(hass, aioclient_mock):
"""Test sending an AddOrUpdateReport message."""
aioclient_mock.post(TEST_URL, text="")
hass.states.async_set(
"binary_sensor.test_contact",
"on",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
await state_report.async_send_add_or_update_message(
hass, DEFAULT_CONFIG, ["binary_sensor.test_contact", "zwave.bla"]
)
assert len(aioclient_mock.mock_calls) == 1
call = aioclient_mock.mock_calls
call_json = call[0][2]
assert call_json["event"]["header"]["namespace"] == "Alexa.Discovery"
assert call_json["event"]["header"]["name"] == "AddOrUpdateReport"
assert len(call_json["event"]["payload"]["endpoints"]) == 1
assert (
call_json["event"]["payload"]["endpoints"][0]["endpointId"]
== "binary_sensor#test_contact"
)
async def test_send_delete_message(hass, aioclient_mock):
"""Test sending an AddOrUpdateReport message."""
aioclient_mock.post(TEST_URL, json={"data": "is irrelevant"})
hass.states.async_set(
"binary_sensor.test_contact",
"on",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
await state_report.async_send_delete_message(
hass, DEFAULT_CONFIG, ["binary_sensor.test_contact", "zwave.bla"]
)
assert len(aioclient_mock.mock_calls) == 1
call = aioclient_mock.mock_calls
call_json = call[0][2]
assert call_json["event"]["header"]["namespace"] == "Alexa.Discovery"
assert call_json["event"]["header"]["name"] == "DeleteReport"
assert len(call_json["event"]["payload"]["endpoints"]) == 1
assert (
call_json["event"]["payload"]["endpoints"][0]["endpointId"]
== "binary_sensor#test_contact"
)
async def test_doorbell_event(hass, aioclient_mock):
"""Test doorbell press reports."""
aioclient_mock.post(TEST_URL, text="", status=202)
hass.states.async_set(
"binary_sensor.test_doorbell",
"off",
{"friendly_name": "Test Doorbell Sensor", "device_class": "occupancy"},
)
await state_report.async_enable_proactive_mode(hass, DEFAULT_CONFIG)
hass.states.async_set(
"binary_sensor.test_doorbell",
"on",
{"friendly_name": "Test Doorbell Sensor", "device_class": "occupancy"},
)
# To trigger event listener
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 1
call = aioclient_mock.mock_calls
call_json = call[0][2]
assert call_json["event"]["header"]["namespace"] == "Alexa.DoorbellEventSource"
assert call_json["event"]["header"]["name"] == "DoorbellPress"
assert call_json["event"]["payload"]["cause"]["type"] == "PHYSICAL_INTERACTION"
assert call_json["event"]["endpoint"]["endpointId"] == "binary_sensor#test_doorbell"
hass.states.async_set(
"binary_sensor.test_doorbell",
"off",
{"friendly_name": "Test Doorbell Sensor", "device_class": "occupancy"},
)
hass.states.async_set(
"binary_sensor.test_doorbell",
"on",
{"friendly_name": "Test Doorbell Sensor", "device_class": "occupancy"},
)
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 2
async def test_proactive_mode_filter_states(hass, aioclient_mock):
"""Test all the cases that filter states."""
aioclient_mock.post(TEST_URL, text="", status=202)
await state_report.async_enable_proactive_mode(hass, DEFAULT_CONFIG)
# First state should report
hass.states.async_set(
"binary_sensor.test_contact",
"on",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 1
aioclient_mock.clear_requests()
# Second one shouldn't
hass.states.async_set(
"binary_sensor.test_contact",
"on",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
assert len(aioclient_mock.mock_calls) == 0
# hass not running should not report
hass.states.async_set(
"binary_sensor.test_contact",
"off",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
with patch.object(hass, "state", core.CoreState.stopping):
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 0
# unsupported entity should not report
hass.states.async_set(
"binary_sensor.test_contact",
"on",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
with patch.dict(
"homeassistant.components.alexa.state_report.ENTITY_ADAPTERS", {}, clear=True
):
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 0
# Not exposed by config should not report
hass.states.async_set(
"binary_sensor.test_contact",
"off",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
with patch.object(DEFAULT_CONFIG, "should_expose", return_value=False):
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 0
# Removing an entity
hass.states.async_remove("binary_sensor.test_contact")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 0
# If serializes to same properties, it should not report
aioclient_mock.post(TEST_URL, text="", status=202)
with patch(
"homeassistant.components.alexa.entities.AlexaEntity.serialize_properties",
return_value=[{"same": "info"}],
):
hass.states.async_set(
"binary_sensor.same_serialize",
"off",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
await hass.async_block_till_done()
hass.states.async_set(
"binary_sensor.same_serialize",
"off",
{"friendly_name": "Test Contact Sensor", "device_class": "door"},
)
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 1
|
|
# -*- coding: utf-8 -*-
"""
flask.ext.security.views
~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security views module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app, redirect, request, render_template, jsonify, \
after_this_request, Blueprint
from flask_login import current_user
from werkzeug.datastructures import MultiDict
from werkzeug.local import LocalProxy
from .confirmable import send_confirmation_instructions, \
confirm_user, confirm_email_token_status
from .decorators import login_required, anonymous_user_required
from .passwordless import send_login_instructions, \
login_token_status
from .recoverable import reset_password_token_status, \
send_reset_password_instructions, update_password
from .changeable import change_user_password
from .registerable import register_user
from .utils import config_value, do_flash, get_url, get_post_login_redirect, \
get_post_register_redirect, get_message, login_user, logout_user, \
url_for_security as url_for
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def _render_json(form, include_auth_token=False):
has_errors = len(form.errors) > 0
if has_errors:
code = 400
response = dict(errors=form.errors)
else:
code = 200
response = dict(user=dict(id=str(form.user.id)))
if include_auth_token:
token = form.user.get_auth_token()
response['user']['authentication_token'] = token
return jsonify(dict(meta=dict(code=code), response=response))
def _commit(response=None):
_datastore.commit()
return response
def _ctx(endpoint):
return _security._run_ctx_processor(endpoint)
@anonymous_user_required
def login():
"""View function for login view"""
form_class = _security.login_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
login_user(form.user, remember=form.remember.data)
after_this_request(_commit)
if not request.json:
return redirect(get_post_login_redirect())
form.next.data = get_url(request.args.get('next')) \
or get_url(request.form.get('next')) or ''
if request.json:
return _render_json(form, True)
return render_template(config_value('LOGIN_USER_TEMPLATE'),
login_user_form=form,
**_ctx('login'))
@login_required
def logout():
"""View function which handles a logout request."""
logout_user()
return redirect(request.args.get('next', None) or
get_url(_security.post_logout_view))
def register():
"""View function which handles a registration request."""
if _security.confirmable or request.json:
form_class = _security.confirm_register_form
else:
form_class = _security.register_form
if request.json:
form_data = MultiDict(request.json)
else:
form_data = request.form
form = form_class(form_data)
if form.validate_on_submit():
user = register_user(**form.to_dict())
form.user = user
if not _security.confirmable or _security.login_without_confirmation:
after_this_request(_commit)
login_user(user)
if not request.json:
return redirect(get_post_register_redirect())
return _render_json(form, True)
if request.json:
return _render_json(form)
return render_template(config_value('REGISTER_USER_TEMPLATE'),
register_user_form=form,
**_ctx('register'))
def send_login():
"""View function that sends login instructions for passwordless login"""
form_class = _security.passwordless_login_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
send_login_instructions(form.user)
if request.json is None:
do_flash(*get_message('LOGIN_EMAIL_SENT', email=form.user.email))
if request.json:
return _render_json(form)
return render_template(config_value('SEND_LOGIN_TEMPLATE'),
send_login_form=form,
**_ctx('send_login'))
@anonymous_user_required
def token_login(token):
"""View function that handles passwordless login via a token"""
expired, invalid, user = login_token_status(token)
if invalid:
do_flash(*get_message('INVALID_LOGIN_TOKEN'))
if expired:
send_login_instructions(user)
do_flash(*get_message('LOGIN_EXPIRED', email=user.email,
within=_security.login_within))
if invalid or expired:
return redirect(url_for('login'))
login_user(user)
after_this_request(_commit)
do_flash(*get_message('PASSWORDLESS_LOGIN_SUCCESSFUL'))
return redirect(get_post_login_redirect())
def send_confirmation():
"""View function which sends confirmation instructions."""
form_class = _security.send_confirmation_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
send_confirmation_instructions(form.user)
if request.json is None:
do_flash(*get_message('CONFIRMATION_REQUEST', email=form.user.email))
if request.json:
return _render_json(form)
return render_template(config_value('SEND_CONFIRMATION_TEMPLATE'),
send_confirmation_form=form,
**_ctx('send_confirmation'))
def confirm_email(token):
"""View function which handles a email confirmation request."""
expired, invalid, user = confirm_email_token_status(token)
if not user or invalid:
invalid = True
do_flash(*get_message('INVALID_CONFIRMATION_TOKEN'))
if expired:
send_confirmation_instructions(user)
do_flash(*get_message('CONFIRMATION_EXPIRED', email=user.email,
within=_security.confirm_email_within))
if invalid or expired:
return redirect(get_url(_security.confirm_error_view) or
url_for('send_confirmation'))
if user != current_user:
logout_user()
login_user(user)
confirm_user(user)
after_this_request(_commit)
do_flash(*get_message('EMAIL_CONFIRMED'))
return redirect(get_url(_security.post_confirm_view) or
get_url(_security.post_login_view))
def forgot_password():
"""View function that handles a forgotten password request."""
form_class = _security.forgot_password_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
send_reset_password_instructions(form.user)
if request.json is None:
do_flash(*get_message('PASSWORD_RESET_REQUEST', email=form.user.email))
if request.json:
return _render_json(form)
return render_template(config_value('FORGOT_PASSWORD_TEMPLATE'),
forgot_password_form=form,
**_ctx('forgot_password'))
@anonymous_user_required
def reset_password(token):
"""View function that handles a reset password request."""
expired, invalid, user = reset_password_token_status(token)
if invalid:
do_flash(*get_message('INVALID_RESET_PASSWORD_TOKEN'))
if expired:
do_flash(*get_message('PASSWORD_RESET_EXPIRED', email=user.email,
within=_security.reset_password_within))
if invalid or expired:
return redirect(url_for('forgot_password'))
form = _security.reset_password_form()
if form.validate_on_submit():
after_this_request(_commit)
update_password(user, form.password.data)
do_flash(*get_message('PASSWORD_RESET'))
login_user(user)
return redirect(get_url(_security.post_reset_view) or
get_url(_security.post_login_view))
return render_template(config_value('RESET_PASSWORD_TEMPLATE'),
reset_password_form=form,
reset_password_token=token,
**_ctx('reset_password'))
@login_required
def change_password():
"""View function which handles a change password request."""
form_class = _security.change_password_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
after_this_request(_commit)
change_user_password(current_user, form.new_password.data)
if request.json is None:
do_flash(*get_message('PASSWORD_CHANGE'))
return redirect(get_url(_security.post_change_view) or
get_url(_security.post_login_view))
if request.json:
form.user = current_user
return _render_json(form)
return render_template(config_value('CHANGE_PASSWORD_TEMPLATE'),
change_password_form=form,
**_ctx('change_password'))
def create_blueprint(state, import_name):
"""Creates the security extension blueprint"""
bp = Blueprint(state.blueprint_name, import_name,
url_prefix=state.url_prefix,
subdomain=state.subdomain,
template_folder='templates')
bp.route(state.logout_url, endpoint='logout')(logout)
if state.passwordless:
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(send_login)
bp.route(state.login_url + '/<token>',
endpoint='token_login')(token_login)
else:
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(login)
if state.registerable:
bp.route(state.register_url,
methods=['GET', 'POST'],
endpoint='register')(register)
if state.recoverable:
bp.route(state.reset_url,
methods=['GET', 'POST'],
endpoint='forgot_password')(forgot_password)
bp.route(state.reset_url + '/<token>',
methods=['GET', 'POST'],
endpoint='reset_password')(reset_password)
if state.changeable:
bp.route(state.change_url,
methods=['GET', 'POST'],
endpoint='change_password')(change_password)
if state.confirmable:
bp.route(state.confirm_url,
methods=['GET', 'POST'],
endpoint='send_confirmation')(send_confirmation)
bp.route(state.confirm_url + '/<token>',
methods=['GET', 'POST'],
endpoint='confirm_email')(confirm_email)
return bp
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
import tvm.relay.testing
import numpy as np
from tvm.relay import Expr
from tvm.relay.analysis import alpha_equal, assert_alpha_equal, assert_graph_equal, free_vars
do_print = [False]
SEMVER = "v0.0.4\n"
def astext(p, unify_free_vars=False):
txt = p.astext()
if isinstance(p, Expr) and free_vars(p):
return txt
x = relay.fromtext(txt)
if unify_free_vars:
assert_graph_equal(x, p)
else:
assert_alpha_equal(x, p)
return txt
def show(text):
if do_print[0]:
print("---------------------------")
print(text)
def test_func():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.add(z, z)
f = relay.Function([x, y], z)
show(astext(z))
show(astext(f))
def test_env():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
z = relay.add(x, y)
z = relay.add(z, z)
f = relay.Function([x, y], z)
env = relay.Module()
env["myf"] = f
text = astext(env)
assert "def @myf" in text
assert "def @myf" in str(env)
assert "add(%0, %0) /* ty=float32 */" in text
assert "add(%0, %0) /* ty=float32 */" in str(env)
show(env.astext(annotate=lambda x: str(x.checked_type.dtype) if type(x) == relay.Call else ""))
show(text)
def test_meta_data():
n, c, h, w = tvm.var("n"), 10, 224, 224
x = relay.var("x", shape=(n, c, h, w))
w = relay.var("w")
z = relay.nn.conv2d(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=2)
f = relay.Function([x, w], z)
text = astext(f, unify_free_vars=True)
text_no_meta = str(f)
assert "channels=2" in text
assert "channels=2" in text_no_meta
assert "meta[Variable][0]" in text
assert "meta[Variable][0]" in text_no_meta
assert "type_key" in text
assert "type_key" not in text_no_meta
text = astext(relay.const([1,2,3]))
assert "meta[relay.Constant][0]" in text
def test_call_attrs():
x = relay.var("x")
# non default args
z = relay.nn.softmax(x, axis=2)
assert "axis=2" in astext(z)
# default args
z = relay.nn.softmax(x)
assert "softmax(%x)" in astext(z)
# non default args
z = relay.expand_dims(x, axis=2, num_newaxis=2)
assert "num_newaxis=2" in astext(z)
def test_let_if_scope():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
cond = relay.var("cond", "bool")
sb = relay.ScopeBuilder()
with sb.if_scope(cond):
v1 = sb.let("v", relay.const(1, "float32"))
v2 = sb.let("v", x)
sb.ret(relay.subtract(v1, v2))
with sb.else_scope():
v3 = relay.var("v")
let2 = relay.Let(v3, y, v3)
sb.ret(relay.add(let2, let2))
result = sb.get()
f = relay.Function([x, y, cond], result)
text = astext(f)
assert text.count("{") == 3
assert "%cond: bool" in text
show(astext(f))
def test_variable_name():
# avoid pure number even if the namehint is pure number
v1 = relay.var("1")
assert "%v1" in astext(v1)
def test_mlp():
net, params = tvm.relay.testing.mlp.get_workload(batch_size=1)
astext(net)
def test_resnet():
net, params = tvm.relay.testing.resnet.get_workload(batch_size=1)
astext(net)
def test_mobilenet():
net, params = tvm.relay.testing.mobilenet.get_workload(batch_size=1)
astext(net)
def test_dqn():
net, params = tvm.relay.testing.dqn.get_workload(batch_size=1)
astext(net)
def test_dcgan():
net, params = tvm.relay.testing.dcgan.get_workload(batch_size=1)
astext(net)
def test_lstm():
net, params = tvm.relay.testing.lstm.get_workload(1, 1)
astext(net)
net, params = tvm.relay.testing.lstm.get_workload(4, 4)
astext(net)
def test_inception_v3():
net, params = tvm.relay.testing.inception_v3.get_workload(batch_size=1)
astext(net)
def test_squeezenet():
for version in ['1.0', '1.1']:
net, params = tvm.relay.testing.squeezenet.get_workload(batch_size=1, version=version)
astext(net)
def test_vgg():
net, params = tvm.relay.testing.vgg.get_workload(batch_size=1)
astext(net)
def test_densenet():
net, params = tvm.relay.testing.densenet.get_workload(batch_size=1)
astext(net)
def test_call_node_order():
x = relay.var("x")
y = relay.var("y")
prog = relay.Call(relay.Function([x], x), [relay.Call(relay.Function([y], y), [relay.const(1)])])
assert astext(prog) == SEMVER + \
("%0 = fn (%y) {\n"
" %y\n"
"};\n"
"%1 = %0(1);\n"
"%2 = fn (%x) {\n"
" %x\n"
"};\n"
"%2(%1)")
def test_let_inlining():
tup = relay.Tuple([relay.const(0), relay.const(0)])
x = relay.var("x")
assert astext(relay.Let(x, tup, tup)) == SEMVER + \
("%0 = (0, 0);\n"
"let %x = %0;\n"
"%0")
assert astext(relay.Let(x, tup, x)) == SEMVER + \
("let %x = (0, 0);\n"
"%x")
def test_zeros():
x = relay.op.zeros([], "float32")
astext(x)
if __name__ == "__main__":
do_print[0] = True
test_lstm()
test_zeros()
test_meta_data()
test_let_inlining()
test_resnet()
test_mobilenet()
test_mlp()
test_dqn()
test_dcgan()
test_squeezenet()
test_inception_v3()
test_vgg()
test_densenet()
test_func()
test_env()
test_call_attrs()
test_let_if_scope()
test_variable_name()
test_call_node_order()
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MemberTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members/MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"last_consumed_message_index": null,
"last_consumption_timestamp": null,
"date_created": "2016-03-24T21:05:50Z",
"date_updated": "2016-03-24T21:05:50Z",
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.create(identity="identity")
values = {'Identity': "identity", }
self.holodeck.assert_has_request(Request(
'post',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"last_consumed_message_index": null,
"last_consumption_timestamp": null,
"date_created": "2016-03-24T21:05:50Z",
"date_updated": "2016-03-24T21:05:50Z",
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.create(identity="identity")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.list()
self.holodeck.assert_has_request(Request(
'get',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members?PageSize=50&Page=0",
"next_page_url": null,
"key": "members"
},
"members": [
{
"sid": "MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"last_consumed_message_index": null,
"last_consumption_timestamp": null,
"date_created": "2016-03-24T21:05:50Z",
"date_updated": "2016-03-24T21:05:50Z",
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members?PageSize=50&Page=0",
"next_page_url": null,
"key": "members"
},
"members": []
}
'''
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members/MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members/MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_role_sid_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"last_consumed_message_index": null,
"last_consumption_timestamp": null,
"date_created": "2016-03-24T21:05:50Z",
"date_updated": "2016-03-24T21:05:50Z",
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_update_last_consumed_message_index_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"identity": "jing",
"role_sid": "RLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"last_consumed_message_index": 666,
"last_consumption_timestamp": null,
"date_created": "2016-03-24T21:05:50Z",
"date_updated": "2016-03-24T21:05:50Z",
"url": "https://ip-messaging.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/MBaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.ip_messaging.v1.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
|
|
"""Support for the Xiaomi IR Remote (Chuangmi IR)."""
import asyncio
from datetime import timedelta
import logging
import time
from miio import ChuangmiIr, DeviceException # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.remote import (
ATTR_DELAY_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
PLATFORM_SCHEMA,
RemoteEntity,
)
from homeassistant.const import (
CONF_COMMAND,
CONF_HOST,
CONF_NAME,
CONF_TIMEOUT,
CONF_TOKEN,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.util.dt import utcnow
from .const import SERVICE_LEARN, SERVICE_SET_REMOTE_LED_OFF, SERVICE_SET_REMOTE_LED_ON
_LOGGER = logging.getLogger(__name__)
DATA_KEY = "remote.xiaomi_miio"
CONF_SLOT = "slot"
CONF_COMMANDS = "commands"
DEFAULT_TIMEOUT = 10
DEFAULT_SLOT = 1
COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_COMMAND): vol.All(cv.ensure_list, [cv.string])}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.All(
int, vol.Range(min=0)
),
vol.Optional(CONF_SLOT, default=DEFAULT_SLOT): vol.All(
int, vol.Range(min=1, max=1000000)
),
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_COMMANDS, default={}): cv.schema_with_slug_keys(
COMMAND_SCHEMA
),
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Xiaomi IR Remote (Chuangmi IR) platform."""
host = config[CONF_HOST]
token = config[CONF_TOKEN]
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
# The Chuang Mi IR Remote Controller wants to be re-discovered every
# 5 minutes. As long as polling is disabled the device should be
# re-discovered (lazy_discover=False) in front of every command.
device = ChuangmiIr(host, token, lazy_discover=False)
# Check that we can communicate with device.
try:
device_info = await hass.async_add_executor_job(device.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
raise PlatformNotReady
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
friendly_name = config.get(CONF_NAME, f"xiaomi_miio_{host.replace('.', '_')}")
slot = config.get(CONF_SLOT)
timeout = config.get(CONF_TIMEOUT)
xiaomi_miio_remote = XiaomiMiioRemote(
friendly_name, device, unique_id, slot, timeout, config.get(CONF_COMMANDS)
)
hass.data[DATA_KEY][host] = xiaomi_miio_remote
async_add_entities([xiaomi_miio_remote])
async def async_service_led_off_handler(entity, service):
"""Handle set_led_off command."""
await hass.async_add_executor_job(entity.device.set_indicator_led, False)
async def async_service_led_on_handler(entity, service):
"""Handle set_led_on command."""
await hass.async_add_executor_job(entity.device.set_indicator_led, True)
async def async_service_learn_handler(entity, service):
"""Handle a learn command."""
device = entity.device
slot = service.data.get(CONF_SLOT, entity.slot)
await hass.async_add_executor_job(device.learn, slot)
timeout = service.data.get(CONF_TIMEOUT, entity.timeout)
_LOGGER.info("Press the key you want Home Assistant to learn")
start_time = utcnow()
while (utcnow() - start_time) < timedelta(seconds=timeout):
message = await hass.async_add_executor_job(device.read, slot)
_LOGGER.debug("Message received from device: '%s'", message)
if "code" in message and message["code"]:
log_msg = "Received command is: {}".format(message["code"])
_LOGGER.info(log_msg)
hass.components.persistent_notification.async_create(
log_msg, title="Xiaomi Miio Remote"
)
return
if "error" in message and message["error"]["message"] == "learn timeout":
await hass.async_add_executor_job(device.learn, slot)
await asyncio.sleep(1)
_LOGGER.error("Timeout. No infrared command captured")
hass.components.persistent_notification.async_create(
"Timeout. No infrared command captured", title="Xiaomi Miio Remote"
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_LEARN,
{
vol.Optional(CONF_TIMEOUT, default=10): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_SLOT, default=1): vol.All(
int, vol.Range(min=1, max=1000000)
),
},
async_service_learn_handler,
)
platform.async_register_entity_service(
SERVICE_SET_REMOTE_LED_ON, {}, async_service_led_on_handler,
)
platform.async_register_entity_service(
SERVICE_SET_REMOTE_LED_OFF, {}, async_service_led_off_handler,
)
class XiaomiMiioRemote(RemoteEntity):
"""Representation of a Xiaomi Miio Remote device."""
def __init__(self, friendly_name, device, unique_id, slot, timeout, commands):
"""Initialize the remote."""
self._name = friendly_name
self._device = device
self._unique_id = unique_id
self._slot = slot
self._timeout = timeout
self._state = False
self._commands = commands
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the remote."""
return self._name
@property
def device(self):
"""Return the remote object."""
return self._device
@property
def slot(self):
"""Return the slot to save learned command."""
return self._slot
@property
def timeout(self):
"""Return the timeout for learning command."""
return self._timeout
@property
def is_on(self):
"""Return False if device is unreachable, else True."""
try:
self.device.info()
return True
except DeviceException:
return False
@property
def should_poll(self):
"""We should not be polled for device up state."""
return False
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGER.error(
"Device does not support turn_on, "
"please use 'remote.send_command' to send commands"
)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGER.error(
"Device does not support turn_off, "
"please use 'remote.send_command' to send commands"
)
def _send_command(self, payload):
"""Send a command."""
_LOGGER.debug("Sending payload: '%s'", payload)
try:
self.device.play(payload)
except DeviceException as ex:
_LOGGER.error(
"Transmit of IR command failed, %s, exception: %s", payload, ex
)
def send_command(self, command, **kwargs):
"""Send a command."""
num_repeats = kwargs.get(ATTR_NUM_REPEATS)
delay = kwargs.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
for _ in range(num_repeats):
for payload in command:
if payload in self._commands:
for local_payload in self._commands[payload][CONF_COMMAND]:
self._send_command(local_payload)
else:
self._send_command(payload)
time.sleep(delay)
|
|
import unittest
import pylab as pl
import matplotlib as mpl
import itertools
import sys
import math
import timeit
import copy
import time
import struct
import scipy.stats.mstats as stats
import ConfigParser
import os
import getopt
import h5py
import random as rnd
import cPickle as pickle
import operator
from gmm.gmm_specializer import *
def get_song_dict():
fileList = []
rootdir = '/disk1/home_user/egonina/asp/MSD/MillionSongSubset/data/'
for root, subFolders, files in os.walk(rootdir):
for file in files:
fileList.append(os.path.join(root,file))
file_tag_dict = {}
for file in fileList:
print file
f = h5py.File(file, 'r')
mbtags = f['musicbrainz']['artist_mbtags']
list = []
for t in mbtags:
list.append(t)
tags = f['metadata']['artist_terms']
tag_freq = f['metadata']['artist_terms_freq']
tags_dict = {}
for t in range(len(tags)):
tags_dict[tags[t]] = tag_freq[t]
file_id = str(f['analysis']['songs']['track_id'][0])
file_tag_dict[file_id] = {}
file_tag_dict[file_id]['artist_mbtags'] = list
file_tag_dict[file_id]['artist_terms'] = tags_dict
file_tag_dict[file_id]['artist_name'] = str(f['metadata']['songs']['artist_name'][0])
file_tag_dict[file_id]['title'] = str(f['metadata']['songs']['title'][0])
file_tag_dict[file_id]['segments_timbre'] = np.array(f['analysis']['segments_timbre'], dtype=np.float32)
file_tag_dict[file_id]['duration'] = float(f['analysis']['songs']['duration'][0])
file_tag_dict[file_id]['tempo'] = float(f['analysis']['songs']['tempo'][0])
file_tag_dict[file_id]['time_signature'] = float(f['analysis']['songs']['time_signature'][0])
file_tag_dict[file_id]['segments_start'] = np.array(f['analysis']['segments_start'], dtype=np.float32)
f.close()
p = open("/disk1/home_user/egonina/asp/MSD/all_file_dict_dump.pkl", "wb")
pickle.dump(file_tag_dict, p, True)
p.close()
return file_tag_dict
def count_songs_by_tag(tags_file_name, output_file_name, fileDict):
tags_file = open(tags_file_name, 'r')
tag_dict = {}
for tag in tags_file:
tag = tag[:len(tag)-1] #delete end-of-line characater
tag_dict[tag] = 0
#---------- READ FILES -----------
start = time.time()
for file in fileDict.keys():
tags = fileDict[file]
if tag in tags:
tag_dict[tag]+=1
total = time.time() - start
print "songs with keyword [" + tag + "]: "+ str(tag_dict[tag])
print "total time: ", total
tag_out = open(output_file_name, 'w')
for tag in tag_dict.keys():
tag_out.write(tag+"\t"+str(tag_dict[tag])+"\n")
tag_out.close()
if __name__ == '__main__':
total_start_time = time.time()
freq_threshold = 0.8
M = 32
category_tag = "metal"
rnd.seed(42)
print "Reading Files"
#song_dict = get_song_dict()
st = time.time()
# assume the dictionary has been already read in and pickled
p = open("/disk1/home_user/egonina/asp/MSD/all_file_dict_dump.pkl", "rb")
song_dict = pickle.load(p)
p.close()
print "--- File Reading:\t", time.time() - st, " -----"
st = time.time()
# collect songs
songs_with_tag = {}
songs_without_tag = {}
song_with_tag_count = 0
song_without_tag_count = 0
for song in song_dict.keys():
if category_tag in song_dict[song]['artist_terms'].keys(): #the song's tag list contains the tag we're looking for
if song_dict[song]['artist_terms'][category_tag] > freq_threshold:
songs_with_tag[song] = song_dict[song]
song_with_tag_count += 1
else:
songs_without_tag[song] = song_dict[song]
song_without_tag_count += 1
print "--- Collecting songs for the tag time:\t", time.time() - st, " ----- "
print "INFO: songs with tag count:", song_with_tag_count
print "INFO: songs without tag count: ", song_without_tag_count
st = time.time()
# get indices for various sets of songs
all_positive_indices = range(song_with_tag_count-1)
all_negative_indices = range(song_without_tag_count-1)
all_indices = range(len(song_dict.keys()))
#split songs with tag into training/testing sets (70/30)
training_sample_indices = np.array(rnd.sample(all_positive_indices, int(song_with_tag_count*0.7)))
testing_sample_indices = np.delete(all_positive_indices, training_sample_indices)
negative_sample_indices = all_negative_indices
print "INFO: number of training indices:", len(training_sample_indices)
print "INFO: testing indices:", len(testing_sample_indices)
print "INFO: negative testing indices:", len(negative_sample_indices)
# get song keys for the:
# - 70% of total songs for training
# - 30% of total songs for testing
# - (total songs - songs with tag) for negative testing
# - 30% of all song features for UBM model
song_keys = np.array(songs_with_tag.keys())
song_neg_keys = np.array(songs_without_tag.keys())
all_song_keys = np.array(song_dict.keys())
# get the corresponding song keys for each of the sets
training_song_keys = song_keys[training_sample_indices]
testing_song_keys = song_keys[testing_sample_indices]
negative_song_keys = song_neg_keys[negative_sample_indices]
# collect features for positive GMM training
first_song = True
for song in training_song_keys:
feats = songs_with_tag[song]['segments_timbre']
if first_song:
total_features = feats
first_song = False
else:
total_features = np.concatenate((total_features, feats))
print "--- Collecting training features time:\t", time.time() - st, " ----- "
print "INFO: total features: ", total_features.shape
# collect features for UBM training
st = time.time()
p = open("/disk1/home_user/egonina/asp/MSD/ubm_features_all.pkl", "rb")
total_ubm_features = np.array(pickle.load(p))
p.close()
# train the UBM on 30% of the total features from all songs
training_ubm_features = np.array(rnd.sample(total_ubm_features, int(len(total_ubm_features)*0.3)))
print "--- Collecting ubm features time:\t", time.time() - st, " -----"
print "INFO: total ubm features: ", total_ubm_features.shape, " 30%: ", training_ubm_features.shape
# train UBM on features
D = total_ubm_features.shape[1]
ubm = GMM(M,D,cvtype='diag')
train_st = time.time()
ubm.train(training_ubm_features)
train_total = time.time() - train_st
print "--- UBM training time:\t", train_total, " -----"
# train positive GMM on features
D = total_features.shape[1]
gmm = GMM(M, D, means=np.array(ubm.components.means), covars=np.array(ubm.components.covars), weights=np.array(ubm.components.weights), cvtype='diag')
train_st = time.time()
gmm.train(total_features)
train_total = time.time() - train_st
print "--- GMM training time:\t", train_total, " -----"
print "--- Testing Labeled Examples ---"
# testing the labeled test files
test_st = time.time()
labeled_songs = {}
unlabeled_songs = {}
for test_song in testing_song_keys:
test_feats = songs_with_tag[test_song]['segments_timbre']
all_lklds = gmm.score(test_feats)
all_ubm_lklds = ubm.score(test_feats)
avg_lkld = np.average(all_lklds)
avg_ubm_lkld = np.average(all_ubm_lklds)
sum_lkld = np.sum(all_lklds)
labeled_songs[str(songs_with_tag[test_song]['artist_name']+ " - "+songs_with_tag[test_song]['title'])] = (avg_lkld, avg_ubm_lkld, avg_lkld - avg_ubm_lkld)
print "--- Testing Unlabeled Examples ---"
test_st = time.time()
count = 0
# testing the unlabeled test files
for test_song in negative_song_keys:
count+=1
print count
test_feats = songs_without_tag[test_song]['segments_timbre']
all_lklds = gmm.score(test_feats)
all_ubm_lklds = ubm.score(test_feats)
avg_lkld = np.average(all_lklds)
avg_ubm_lkld = np.average(all_ubm_lklds)
sum_lkld = np.sum(all_lklds)
unlabeled_songs[str(songs_without_tag[test_song]['artist_name'] + " - " + songs_without_tag[test_song]['title'])] = (avg_lkld, avg_ubm_lkld, avg_lkld - avg_ubm_lkld)
test_total = time.time() - test_st
print "--- Total testing time:\t", test_total, " -----"
#print out top 20 labeled suggestions and unlabeled recommendations
print "======================================================================"
print "=================== TOP 20 LABELED SAMPLES ==========================="
print "======================================================================"
sorted_lab_samples = sorted(labeled_songs.iteritems(), key=lambda k: k[1][2], reverse=True)
for p in range(20):
print sorted_lab_samples[p]
print "======================================================================"
print "=================== TOP 20 UNLABELED SAMPLES ========================="
print "======================================================================"
sorted_unlab_samples = sorted(unlabeled_songs.iteritems(), key=lambda k: k[1][2], reverse=True)
for p in range(20):
print sorted_unlab_samples[p]
print "-------------- DONE ---------------"
print "--- Total time: ", time.time() - total_start_time, " ---"
print "-----------------------------------"
|
|
import json
import os
from dataclasses import dataclass
from os import path
from typing import Callable, Dict, Generator, Sequence
import pytest
import tests.functional.services.catalog.utils.api as catalog_api
import tests.functional.services.policy_engine.utils.api as policy_engine_api
from anchore_engine.db import session_scope
from anchore_engine.db.entities.catalog import CatalogImage, CatalogImageDocker
from anchore_engine.db.entities.policy_engine import (
CachedPolicyEvaluation,
CpeV2Vulnerability,
FeedMetadata,
FixedArtifact,
ImageVulnerabilitiesReport,
NvdV2Metadata,
Vulnerability,
)
from tests.functional.services.catalog.utils.utils import add_or_replace_document
from tests.functional.services.policy_engine.conftest import is_legacy_provider
from tests.functional.services.utils import http_utils
CURRENT_DIR = path.dirname(path.abspath(__file__))
ANALYSIS_FILES_DIR = path.join(CURRENT_DIR, "analysis_files")
SEED_FILE_DIR = path.join(CURRENT_DIR, "database_seed_files")
FEEDS_DATA_PATH_PREFIX = path.join("data", "v1", "service", "feeds")
EXPECTED_CONTENT = path.join(CURRENT_DIR, "expected_output")
@dataclass
class AnalysisFile:
filename: str
image_digest: str
ANALYSIS_FILES: Sequence[AnalysisFile] = [
AnalysisFile(
"alpine-test.json",
"sha256:80a31c3ce2e99c3691c27ac3b1753163214494e9b2ca07bfdccf29a5cca2bfbe",
),
AnalysisFile(
"debian-test.json",
"sha256:406413437f26223183d133ccc7186f24c827729e1b21adc7330dd43fcdc030b3",
),
AnalysisFile(
"centos-test.json",
"sha256:fe3ca35038008b0eac0fa4e686bd072c9430000ab7d7853001bde5f5b8ccf60c",
),
]
IMAGE_DIGEST_ID_MAP: Dict[str, str] = {}
IMAGE_DIGEST_MAP = {
"sha256:80a31c3ce2e99c3691c27ac3b1753163214494e9b2ca07bfdccf29a5cca2bfbe": {
"tag": "anchore/test_images:vulnerabilities-alpine-f5e8952",
"image_id": "8d4db62fbc412dd3a19f55bdf3d15bed65a7cdf9a3cf00630da685af565e2d25",
},
"sha256:406413437f26223183d133ccc7186f24c827729e1b21adc7330dd43fcdc030b3": {
"tag": "anchore/test_images:vulnerabilities-debian-f5e8952",
"image_id": "cbe22359b63443e715091e24efbcdeaa6bb3fb96c6fffeb5a1e85caaffc83565",
},
"sha256:fe3ca35038008b0eac0fa4e686bd072c9430000ab7d7853001bde5f5b8ccf60c": {
"tag": "anchore/test_images:vulnerabilities-centos-f5e8952",
"image_id": "08b3583ff5e85fb755be57d2ae9b14e3b5e4d406a5de55246b2ca84b2035f5da",
},
}
@pytest.fixture(scope="package")
def add_catalog_documents(request) -> None:
"""
Adds analyzer manifests to catalog. Deletes existing manifests and images if they exist.
"""
# Do not load up any catalog documents if legacy test
if not is_legacy_provider():
return
for analysis_file in ANALYSIS_FILES:
file_path = path.join(ANALYSIS_FILES_DIR, analysis_file.filename)
with open(file_path, "r") as f:
file_contents = f.read()
analysis_document = json.loads(file_contents)
add_or_replace_document(
"analysis_data", analysis_file.image_digest, analysis_document
)
image_id = analysis_document["document"][0]["image"]["imageId"]
try:
policy_engine_api.users.delete_image(image_id)
except http_utils.RequestFailedError as err:
if err.status_code != 404:
raise err
IMAGE_DIGEST_ID_MAP[analysis_file.image_digest] = image_id
def remove_documents_and_image() -> None:
"""
Cleanup, deletes added images and analyzer manifests.
"""
for analysis_file in ANALYSIS_FILES:
catalog_api.objects.delete_document(
"analysis_data", analysis_file.image_digest
)
policy_engine_api.users.delete_image(
IMAGE_DIGEST_ID_MAP[analysis_file.image_digest]
)
request.addfinalizer(remove_documents_and_image)
@pytest.fixture(scope="package")
def ingress_image(add_catalog_documents) -> Callable[[str], http_utils.APIResponse]:
"""
Returns method that adds new image to policy engine for vulnerability scanning. Moved to fixture to reduce code duplication.
:return: METHOD that calls ingress_image for the policy engine API with the appropriate catalog URL
:rtype: Callable[[str], http_utils.APIResponse]
"""
def _ingress_image(image_digest: str) -> http_utils.APIResponse:
"""
Adds new image to policy engine for vulnerability scanning. Moved to fixture to reduce code duplication.
:param image_digest: image digest of image to ingress
:type image_digest: str
:return: api response
:rtype: http_utils.APIResponse
"""
fetch_url = f"catalog://{http_utils.DEFAULT_API_CONF['ANCHORE_API_ACCOUNT']}/analysis_data/{image_digest}"
image_id = IMAGE_DIGEST_ID_MAP[image_digest]
return policy_engine_api.images.ingress_image(fetch_url, image_id)
return _ingress_image
@pytest.fixture(scope="package")
def ingress_all_images(ingress_image) -> None:
"""
Ingress all test images.
"""
for analysis_file in ANALYSIS_FILES:
ingress_image(analysis_file.image_digest)
@pytest.fixture(scope="session")
def image_digest_id_map() -> Dict[str, str]:
"""
:return: lookup mapping of image_digest to image_id
:rtype: Dict[str, str]
"""
return IMAGE_DIGEST_ID_MAP
SEED_FILE_TO_DB_TABLE_MAP: Dict[str, Callable] = {
"feed_data_vulnerabilities.json": Vulnerability,
"feed_data_vulnerabilities_fixed_artifacts.json": FixedArtifact,
"feed_data_nvdv2_vulnerabilities.json": NvdV2Metadata,
"feed_data_cpev2_vulnerabilities.json": CpeV2Vulnerability,
"feeds.json": FeedMetadata,
"catalog_image.json": CatalogImage,
"catalog_image_docker.json": CatalogImageDocker,
}
CATALOG_FILES = ["catalog_image.json", "catalog_image_docker.json"]
VULN_DATA_FILES = [
"feed_data_vulnerabilities.json",
"feed_data_vulnerabilities_fixed_artifacts.json",
"feed_data_nvdv2_vulnerabilities.json",
"feed_data_cpev2_vulnerabilities.json",
"feeds.json",
]
SEED_FILE_TO_METADATA_MAP: Dict[str, str] = {
"feed_data_vulnerabilities.json": "metadata_json",
"feed_data_vulnerabilities_fixed_artifacts.json": "fix_metadata",
}
def load_seed_file_rows(file_name: str) -> Generator[Dict, None, None]:
"""
Loads database seed files (json lines) and yields the json objects.
:param file_name: name of seed file to load
:type file_name: str
:return: generator yields json
:rtype: Generator[Dict, None, None]
"""
json_file = os.path.join(SEED_FILE_DIR, file_name)
with open(json_file, "rb") as f:
for line in f:
linetext = line.decode("unicode_escape").strip()
json_content = json.loads(linetext)
if file_name in SEED_FILE_TO_METADATA_MAP:
json_key = SEED_FILE_TO_METADATA_MAP[file_name]
if json_content[json_key] is not None:
json_content[json_key] = json.loads(json_content[json_key])
yield json_content
def _setup_vuln_data():
"""
Loads database seed files and bulk saves all records direclty to db
"""
with session_scope() as db:
all_records = []
files_to_seed = []
if is_legacy_provider():
files_to_seed += CATALOG_FILES
# If legacy provider, add vuln data to be seeded to files
# if grype provider, ensure the grypedb is synced
if is_legacy_provider():
files_to_seed += VULN_DATA_FILES
else:
policy_engine_api.feeds.feeds_sync(force_flush=True)
# seed data to engine db
for seed_file_name in files_to_seed:
entry_cls = SEED_FILE_TO_DB_TABLE_MAP[seed_file_name]
for db_entry in load_seed_file_rows(seed_file_name):
all_records.append(entry_cls(**db_entry))
db.bulk_save_objects(all_records)
db.flush()
@pytest.fixture(scope="package", autouse=True)
def setup_vuln_data(
request, set_env_vars, anchore_db, teardown_and_recreate_tables
) -> None:
"""
Writes database seed file content to database. This allows us to ensure consistent vulnerability results (regardless of feed sync status).
"""
tablenames = [cls.__tablename__ for cls in SEED_FILE_TO_DB_TABLE_MAP.values()]
tablenames.extend(
[CachedPolicyEvaluation.__tablename__, ImageVulnerabilitiesReport.__tablename__]
)
teardown_and_recreate_tables(tablenames)
_setup_vuln_data()
request.addfinalizer(lambda: teardown_and_recreate_tables(tablenames))
@pytest.fixture(scope="package")
def setup_image(ingress_image, add_image_with_teardown_package_scope):
"""
This fixture is used to get the image being tested into the correct analyzed state to test vulnerabilities
If its the legacy provider, uses the old method of ingressing image directly into the policy engine
If grype, uses the api to add and analyze the image
"""
def _setup_image(image_digest):
if is_legacy_provider():
return ingress_image(image_digest)
else:
return add_image_with_teardown_package_scope(
IMAGE_DIGEST_MAP[image_digest]["tag"]
)
return _setup_image
@pytest.fixture(scope="package")
def setup_all_images(ingress_image, setup_image):
if is_legacy_provider():
for analysis_file in ANALYSIS_FILES:
ingress_image(analysis_file.image_digest)
else:
for tag in IMAGE_DIGEST_MAP.keys():
setup_image(tag)
|
|
import datetime
import logging
import os
import sys
import threading
import time
from argparse import ArgumentParser
from io import UnsupportedOperation
from queue import Queue, Empty as QueueEmpty
# This seems like a waste of an import
from .constants import TOKYO_TZ, HHMM_FMT
from .control import ShowroomLiveControllerThread as ShowroomController
from .exceptions import ShowroomStopRequest
from .index import ShowroomIndex
from .settings import ShowroomSettings, DEFAULTS
# build settings and index objects from arguments
# build controller
# start controller
# translate command line instructions to controller commands
cli_logger = logging.getLogger('showroom.cli')
class BasicCLI(object):
@staticmethod
def build_parser():
parser = ArgumentParser(description="Watches Showroom for live videos and downloads them \
when they become available. Most options only apply in --all mode",
epilog="The max-* options, parser, index, and output-dir haven't been \
fully tested yet. A new indexing system is currently in use, but \
no command-line arguments to control it yet exist.")
parser.add_argument('names', nargs='*',
help='A quoted Member Name to watch. Accepts a list of names, separated by spaces. '
'Currently, the Member Name must match the English Name (engName) key exactly.')
parser.add_argument('--all', '-a', action='store_true', dest='record_all',
help='Watch the main showroom page for live shows and record all of them.')
parser.add_argument('--output-dir', '-o',
help='Directory in which to store active and completed downloads. \
Defaults to "{directory[output]}"'.format(**DEFAULTS))
parser.add_argument('--config', help="Path to config file")
parser.add_argument('--data-dir', '-d',
help='Data directory. Defaults to "{directory[data]}"'.format(**DEFAULTS))
parser.add_argument('--index', '-i', dest="index_dir",
help='Path to an index directory, containing room information in json files \
with a jdex extension. Defaults to "{directory[index]}"'.format(**DEFAULTS))
parser.add_argument('--max-downloads', '-D', type=int,
help='Maximum number of concurrent downloads. \
Defaults to {throttle[max][downloads]}'.format(**DEFAULTS))
parser.add_argument('--max-watches', '-W', type=int,
help='Maximum number of rooms to watch at once (waiting for them to go live). \
Defaults to {throttle[max][watches]}'.format(**DEFAULTS))
parser.add_argument('--max-priority', '-P', type=int,
help='Any members with priority over this value will be ignored. \
Defaults to {throttle[max][priority]}'.format(**DEFAULTS))
parser.add_argument('--live-rate', '-R', dest="onlives_rate", type=float,
help='Seconds between each poll of ONLIVES. \
Defaults to {throttle[rate][onlives]}'.format(**DEFAULTS))
parser.add_argument('--schedule-rate', '-S', dest="upcoming_rate", type=float,
help='Seconds between each check of the schedule. \
Defaults to {throttle[rate][upcoming]}'.format(**DEFAULTS))
# conflicts with config
# parser.add_argument('--comments', dest='comments', action='store_true')
'''
# TODO: Allow the user to provide a schedule with different start and end hours per day.
# Or else instead of stopping entirely, slow down polling during off hours.
parser.add_argument('--end_hour', default=END_HOUR, type=int,
help='Hour to stop recording (will actually stop 10 minutes later). \
Defaults to %(default)s')
parser.add_argument('--resume_hour', default=RESUME_HOUR, type=int,
help='Hour to resume recording (will actually start 10 minutes earlier). \
Defaults to %(default)s')
'''
# TODO: handle names in arg parser
parser.add_argument('--logging', action='store_true', help="Turns on ffmpeg logging.")
parser.add_argument('--noisy', action='store_true', help="Print download links when downloads start")
return parser
# TODO: MessageHandler class that parses a message object and returns the desired string
# based on the stored query
@staticmethod
def _parse_index_filter_list(filter_list):
if len(filter_list['unwanted']) == 0:
return "Downloading all rooms."
elif len(filter_list['wanted']) == 0:
return "Not downloading any rooms."
elif len(filter_list['wanted']) > len(filter_list['unwanted']):
# TODO: word wrap?
names = ', '.join(filter_list['unwanted'])
return "Not downloading the following rooms:\n{}".format(names)
else:
names = ', '.join(filter_list['wanted'])
return "Downloading the following rooms:\n{}".format(names)
# TODO: have these return a single string instead of printing directly
@staticmethod
def _parse_scheduled_rooms(scheduled):
def print_status(item):
if item['mode'] in ('live', 'download'):
return " (LIVE)"
else:
return ""
output = ["{start} {group} {name}{status}".format(start=e['start_time'].strftime(HHMM_FMT),
group=e['room']['group'],
name=e['room']['name'],
status=print_status(e))
for e in scheduled]
print('----------\n{} Scheduled Rooms:'.format(len(output)))
print(*output, sep='\n')
print()
@staticmethod
def _parse_live_rooms(lives):
def print_status(item):
if item['mode'] == 'download':
return " (DOWNLOADING)"
else:
return ""
output = ["{start} {group} {name}{status}".format(start=e['start_time'].strftime(HHMM_FMT),
group=e['room']['group'],
name=e['room']['name'],
status=print_status(e))
for e in lives]
print('----------\n{} LIVE ROOMS:'.format(len(output)))
print(*output, sep='\n')
print()
@staticmethod
def _parse_download_rooms(downloads):
output = ["{start} {group} {name}\n".format(start=e['start_time'].strftime(HHMM_FMT),
group=e['room']['group'],
name=e['room']['name'])
for e in downloads]
print('----------\n{} Downloading Rooms:'.format(len(output)))
print(*output, sep='\n')
print()
@staticmethod
def _parse_download_links(downloads):
def print_status(item):
if item['mode'] == 'download':
return ""
else:
return " (not downloading)"
output = ["{start} {group} {name}{status}\n"
"{web_url}\n{rtmp_url}".format(start=e['start_time'].strftime(HHMM_FMT),
group=e['room']['group'],
name=e['room']['name'],
status=print_status(e),
web_url=e['room']['web_url'],
rtmp_url=e['download']['streaming_urls'])
for e in downloads]
print('----------\nDOWNLOAD LINKS:')
print(*output, sep='\n')
print()
def __init__(self):
args = self.build_parser().parse_args()
if args:
self.settings = ShowroomSettings.from_args(args)
else:
self.settings = ShowroomSettings()
os.environ.update(self.settings.environment)
# does this work? what is it relative to?
self.index = ShowroomIndex(self.settings.directory.index, record_all=self.settings.filter.all)
# DEBUG
cli_logger.debug('Index has {} rooms'.format(len(self.index)))
self.control_thread = ShowroomController(self.index, self.settings)
self.input_queue = InputQueue()
if args.record_all:
self.control_thread.index.filter_all()
else:
self.control_thread.index.filter_add(args.names)
self.control_thread.index.filter_add(self.settings.filter.wanted)
self._time = datetime.datetime.fromtimestamp(0.0, tz=TOKYO_TZ)
# TODO: This needs to be revised
self.query_dict = {"index_filter_list": self._parse_index_filter_list,
"schedule": self._parse_scheduled_rooms,
"lives": self._parse_live_rooms,
"downloads": self._parse_download_rooms,
"downloads_links": self._parse_download_links}
def start(self):
print('Starting up Showroom Watcher...')
self.input_queue.start()
self.control_thread.start()
# Is this the best place to put this message?
def run(self):
"""Do stuff."""
while True:
try:
self.read_commands()
except ShowroomStopRequest:
print("Exiting...")
return
# Automatic hourly schedule updates
# curr_time = datetime.datetime.now(tz=TOKYO_TZ)
# if (curr_time - self._time).total_seconds() > 3600.0:
# self._time = curr_time
# print(curr_time.strftime("\n\n%H:%M"))
# self.control_thread.send_command('schedule')
time.sleep(0.2)
self.get_messages()
def read_commands(self):
while not self.input_queue.empty():
try:
line = self.input_queue.get(block=False)
except QueueEmpty:
break
else:
self.parse_command(line)
# TODO: CommandHandler class?
def parse_command(self, line):
# here we take every allowed command and try to translate it to a call on the control_thread
# we need to construct a language though...
# set and get are obvious
# todo: more robust translation
ct = self.control_thread
send = ct.send_command
line = line.lower()
if line.startswith('index'):
if 'index filter' in line:
if "filter all" in line:
send('index_filter', "all")
elif "filter none" in line:
send('index_filter', "none")
elif "filter add" in line:
names = line.split('filter add')[-1].strip()
split_names = [e.strip() for e in names.split(',')]
send('index_filter', add=split_names)
print("Turning on downloads for the following rooms:\n" + ', '.join(names).title())
elif "filter remove" in line:
names = line.split('filter remove')[-1].strip()
split_names = [e.strip() for e in names.split(',')]
send('index_filter', remove=split_names)
# TODO: print a log info message when this actually gets done,
# as chances are the results won't be 100% exactly what's printed here
print("Turning off downloads for the following rooms:\n" + ', '.join(names).title())
elif 'index update' in line:
if "update from web" in line:
send('index_update', src="web")
else:
send('index_update')
# TODO: other set commands
elif line.startswith("get"):
if 'get index filter' in line:
send('index_filter')
elif 'get schedule' in line:
send('schedule')
elif 'get live' in line:
send('lives')
elif 'get download' in line:
send('downloads')
elif 'get links' in line:
# i want the same content but in a different format, what's the right way to do this?
send('downloads_links')
elif line.startswith('schedule'):
send('schedule')
elif line.startswith('live'):
send('lives')
elif line.startswith('download'):
if 'links' in line:
send('downloads_links')
else:
send('downloads')
elif line.startswith('links'):
send('downloads_links')
elif line.strip() == 'help':
print("""
The following commands are recognised:
schedule -- prints a schedule
live -- prints currently live rooms
downloads -- prints current downloads
links -- prints live rooms with links (and full JSON streaming data)
quit -- stop activity and exit
help -- this text
""")
print('\nNOTE: The "links" command has very noisy and unhelpful output at this time.',
'Also, sometimes commands are ignored. Wait a bit and try again.', sep='\n')
# NOT IMPLEMENTED
# not sure if the index stuff is implemented or not. `get index filter` doesn't work, at least
"""
index update from web -- update the index from github (NOT IMPLEMENTED)
index filter all -- selects all rooms for downloading
index filter none -- selects no rooms for downloading
index filter add name1, name2, name3...
index filter remove name1, name2, name3...
-- add or remove rooms from the download list
-- name must match exactly (case insensitive)
index update -- locally update the index
get index filter -- returns info about the filter
"""
# No idea if these work, but I know I haven't tested them thoroughly and they're too dangerous.
"""
stop -- stop activity (program will continue running)
start -- restart activity
"""
# TODO: test these
elif line.strip() == 'stop':
"--stop--"
ct.stop()
ct.join()
print('Stopped')
elif line.strip() == 'start':
"--start--"
ct.start()
print('Started')
elif line.strip() == 'quit':
"--quit--"
print('Quitting...')
ct.stop()
self.input_queue.stop()
ct.join()
raise ShowroomStopRequest
def get_messages(self):
messages = self.control_thread.get_messages()
for msg in messages:
self.parse_message(msg)
def parse_message(self, msg):
query = msg.query
message = msg.content
if query in self.query_dict:
text = self.query_dict[query](message)
if text:
print(text)
class InputQueue(Queue):
def __init__(self):
super().__init__()
self.STDIN = None
self.input_thread = None
def read_commands(self):
while True:
try:
# DEBUG
# print('waiting for line')
line = self.STDIN.readline()
# DEBUG
# print('read line')
except ValueError:
# tried to read from a closed STDIN
return
if line:
self.put(line)
time.sleep(0.1)
def start(self):
# make an alias of stdin so that we can close it later
# TODO: allow taking input from other sources?
try:
fileno = sys.stdin.fileno()
except UnsupportedOperation:
# trying to run this from idle?
raise
if fileno is not None:
self.STDIN = os.fdopen(os.dup(fileno))
else:
self.STDIN = None # this is a failure state!
self.input_thread = threading.Thread(target=self.read_commands)
self.input_thread.daemon = True
self.input_thread.start()
print('\nType "help" for a list of commands.')
def stop(self):
# the alternative is sending SIGKILL or something
if self.STDIN:
self.STDIN.close()
self.input_thread.join()
self.STDIN = None
self.input_thread = None
# clear the queue?
|
|
"""
Starting or restarting of services and daemons
==============================================
Services are defined as system daemons and are typically launched using system
init or rc scripts. This service state uses whichever service module is loaded
on the minion with the virtualname of ``service``. Services can be defined as
either running or dead.
If you need to know if your init system is supported, see the list of supported
:mod:`service modules <salt.modules.service.py>` for your desired init system
(systemd, sysvinit, launchctl, etc.).
Note that Salt's service execution module, and therefore this service state,
uses OS grains to ascertain which service module should be loaded and used to
execute service functions. As existing distributions change init systems or
new distributions are created, OS detection can sometimes be incomplete.
If your service states are running into trouble with init system detection,
please see the :ref:`Overriding Virtual Module Providers <module-provider-override>`
section of Salt's module documentation to work around possible errors.
.. note::
The current status of a service is determined by the return code of the init/rc
script status command. A status return code of 0 it is considered running. Any
other return code is considered dead.
.. code-block:: yaml
httpd:
service.running: []
The service can also be set to start at runtime via the enable option:
.. code-block:: yaml
openvpn:
service.running:
- enable: True
By default if a service is triggered to refresh due to a watch statement the
service is restarted. If the desired behavior is to reload the service, then
set the reload value to True:
.. code-block:: yaml
redis:
service.running:
- enable: True
- reload: True
- watch:
- pkg: redis
.. note::
More details regarding ``watch`` can be found in the
:ref:`Requisites <requisites>` documentation.
"""
import logging
import time
import salt.utils.data
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.utils.args import get_function_argspec as _argspec
from salt.utils.systemd import booted
SYSTEMD_ONLY = ("no_block", "unmask", "unmask_runtime")
log = logging.getLogger(__name__)
__virtualname__ = "service"
def __virtual__():
"""
Only make these states available if a service provider has been detected or
assigned for this minion
"""
if "service.start" in __salt__:
return __virtualname__
else:
return (
False,
"No service execution module loaded: "
"check support for service management on {} "
"".format(__grains__.get("osfinger", __grains__["os"])),
)
# Double-asterisk deliberately not used here
def _get_systemd_only(func, kwargs):
if not hasattr(_get_systemd_only, "HAS_SYSTEMD"):
setattr(_get_systemd_only, "HAS_SYSTEMD", booted())
ret = {}
warnings = []
valid_args = _argspec(func).args
for systemd_arg in SYSTEMD_ONLY:
if systemd_arg in kwargs and systemd_arg in valid_args:
if _get_systemd_only.HAS_SYSTEMD:
ret[systemd_arg] = kwargs[systemd_arg]
else:
warnings.append(
"The '{}' argument is not supported by this platform".format(
systemd_arg
)
)
return ret, warnings
def _add_warnings(ret, warnings):
current_warnings = ret.setdefault("warnings", [])
current_warnings.extend([x for x in warnings if x not in current_warnings])
def _enabled_used_error(ret):
"""
Warn of potential typo.
"""
ret["result"] = False
ret["comment"] = (
'Service {} uses non-existent option "enabled". '
'Perhaps "enable" option was intended?'.format(ret["name"])
)
return ret
def _enable(name, started, result=True, **kwargs):
"""
Enable the service
"""
ret = {}
# is service available?
try:
if not _available(name, ret):
return ret
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
# Set default expected result
ret["result"] = result
# Check to see if this minion supports enable
if "service.enable" not in __salt__ or "service.enabled" not in __salt__:
if started is True:
ret[
"comment"
] = "Enable is not available on this minion, service {} started".format(
name
)
elif started is None:
ret["comment"] = (
"Enable is not available on this minion,"
" service {} is in the desired state".format(name)
)
else:
ret[
"comment"
] = "Enable is not available on this minion, service {} is dead".format(
name
)
return ret
# Service can be enabled
before_toggle_enable_status = __salt__["service.enabled"](name, **kwargs)
if before_toggle_enable_status:
# Service is enabled
if started is True:
ret["comment"] = "Service {} is already enabled, and is running".format(
name
)
elif started is None:
# always be sure in this case to reset the changes dict
ret["changes"] = {}
ret[
"comment"
] = "Service {} is already enabled, and is in the desired state".format(
name
)
else:
ret["comment"] = "Service {} is already enabled, and is dead".format(name)
return ret
# Service needs to be enabled
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Service {} set to be enabled".format(name)
return ret
try:
if __salt__["service.enable"](name, **kwargs):
# Service has been enabled
ret["changes"] = {}
after_toggle_enable_status = __salt__["service.enabled"](name, **kwargs)
# on upstart, certain services like apparmor will always return
# False, even if correctly activated
# do not trigger a change
if before_toggle_enable_status != after_toggle_enable_status:
ret["changes"][name] = True
if started is True:
ret["comment"] = "Service {} has been enabled, and is running".format(
name
)
elif started is None:
ret[
"comment"
] = "Service {} has been enabled, and is in the desired state".format(
name
)
else:
ret["comment"] = "Service {} has been enabled, and is dead".format(name)
return ret
except CommandExecutionError as exc:
enable_error = exc.strerror
else:
enable_error = False
# Service failed to be enabled
ret["result"] = False
if started is True:
ret["comment"] = (
"Failed when setting service {} to start at boot,"
" but the service is running".format(name)
)
elif started is None:
ret["comment"] = (
"Failed when setting service {} to start at boot,"
" but the service was already running".format(name)
)
else:
ret[
"comment"
] = "Failed when setting service {} to start at boot, and the service is dead".format(
name
)
if enable_error:
ret["comment"] += ". Additional information follows:\n\n{}".format(enable_error)
return ret
def _disable(name, started, result=True, **kwargs):
"""
Disable the service
"""
ret = {}
# is service available?
try:
if not _available(name, ret):
ret["result"] = True
return ret
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
# Set default expected result
ret["result"] = result
# is enable/disable available?
if "service.disable" not in __salt__ or "service.disabled" not in __salt__:
if started is True:
ret[
"comment"
] = "Disable is not available on this minion, service {} started".format(
name
)
elif started is None:
ret["comment"] = (
"Disable is not available on this minion,"
" service {} is in the desired state".format(name)
)
else:
ret[
"comment"
] = "Disable is not available on this minion, service {} is dead".format(
name
)
return ret
# Service can be disabled
if salt.utils.platform.is_windows():
# service.disabled in Windows returns True for services that are set to
# Manual start, so we need to check specifically for Disabled
before_toggle_disable_status = __salt__["service.info"](name)["StartType"] in [
"Disabled"
]
else:
before_toggle_disable_status = __salt__["service.disabled"](name)
if before_toggle_disable_status:
# Service is disabled
if started is True:
ret["comment"] = "Service {} is already disabled, and is running".format(
name
)
elif started is None:
# always be sure in this case to reset the changes dict
ret["changes"] = {}
ret[
"comment"
] = "Service {} is already disabled, and is in the desired state".format(
name
)
else:
ret["comment"] = "Service {} is already disabled, and is dead".format(name)
return ret
# Service needs to be disabled
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Service {} set to be disabled".format(name)
return ret
if __salt__["service.disable"](name, **kwargs):
# Service has been disabled
ret["changes"] = {}
after_toggle_disable_status = __salt__["service.disabled"](name)
# on upstart, certain services like apparmor will always return
# False, even if correctly activated
# do not trigger a change
if before_toggle_disable_status != after_toggle_disable_status:
ret["changes"][name] = True
if started is True:
ret["comment"] = "Service {} has been disabled, and is running".format(name)
elif started is None:
ret[
"comment"
] = "Service {} has been disabled, and is in the desired state".format(name)
else:
ret["comment"] = "Service {} has been disabled, and is dead".format(name)
return ret
# Service failed to be disabled
ret["result"] = False
if started is True:
ret[
"comment"
] = "Failed when setting service {} to not start at boot, and is running".format(
name
)
elif started is None:
ret["comment"] = (
"Failed when setting service {} to not start"
" at boot, but the service was already running".format(name)
)
else:
ret["comment"] = (
"Failed when setting service {} to not start"
" at boot, and the service is dead".format(name)
)
return ret
def _offline():
return "service.offline" in __salt__ and __salt__["service.offline"]()
def _available(name, ret):
"""
Check if the service is available
"""
avail = False
if "service.available" in __salt__:
avail = __salt__["service.available"](name)
elif "service.get_all" in __salt__:
avail = name in __salt__["service.get_all"]()
if not avail:
ret["result"] = False
ret["comment"] = "The named service {} is not available".format(name)
return avail
def running(name, enable=None, sig=None, init_delay=None, **kwargs):
"""
Ensure that the service is running
name
The name of the init or rc script used to manage the service
enable
Set the service to be enabled at boot time, ``True`` sets the service
to be enabled, ``False`` sets the named service to be disabled. The
default is ``None``, which does not enable or disable anything.
sig
The string to search for when looking for the service process with ps
init_delay
Some services may not be truly available for a short period after their
startup script indicates to the system that they are. Provide an
'init_delay' to specify that this state should wait an additional given
number of seconds after a service has started before returning. Useful
for requisite states wherein a dependent state might assume a service
has started but is not yet fully initialized.
no_block : False
**For systemd minions only.** Starts the service using ``--no-block``.
.. versionadded:: 2017.7.0
timeout
**For Windows minions only.**
The time in seconds to wait for the service to start before returning.
Default is the default for :py:func:`win_service.start
<salt.modules.win_service.start>`.
.. versionadded:: 2017.7.9,2018.3.4
unmask : False
**For systemd minions only.** Set to ``True`` to remove an indefinite
mask before attempting to start the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
making any changes. This behavior is no longer the default.
unmask_runtime : False
**For systemd minions only.** Set to ``True`` to remove a runtime mask
before attempting to start the service.
.. versionadded:: 2017.7.0
In previous releases, Salt would simply unmask a service before
making any changes. This behavior is no longer the default.
wait : 3
**For systemd minions only.** Passed through when using
:py:func:`service.status <salt.modules.systemd_service.status>` to
determine whether the service is running or not.
.. versionadded:: 2019.2.3
.. note::
``watch`` can be used with service.running to restart a service when
another state changes ( example: a file.managed state that creates the
service's config file ). More details regarding ``watch`` can be found
in the :ref:`Requisites <requisites>` documentation.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# used to let execution modules know which service state is being run.
__context__["service.state"] = "running"
# Check for common error: using enabled option instead of enable
if "enabled" in kwargs:
return _enabled_used_error(ret)
# Convert enable to boolean in case user passed a string value
if isinstance(enable, str):
enable = salt.utils.data.is_true(enable)
if _offline():
ret["result"] = True
ret["comment"] = "Running in OFFLINE mode. Nothing to do"
return ret
# Check if the service is available
try:
if not _available(name, ret):
if __opts__.get("test"):
ret["result"] = None
ret["comment"] = (
"Service {} not present; if created in this state run, "
"it would have been started".format(name)
)
return ret
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
status_kwargs, warnings = _get_systemd_only(__salt__["service.status"], kwargs)
if warnings:
_add_warnings(ret, warnings)
# lot of custom init script won't or mis implement the status
# command, so it is just an indicator but can not be fully trusted
before_toggle_status = __salt__["service.status"](name, sig, **status_kwargs)
if "service.enabled" in __salt__:
before_toggle_enable_status = __salt__["service.enabled"](name)
else:
before_toggle_enable_status = True
unmask_ret = {"comment": ""}
if kwargs.get("unmask", False):
unmask_ret = unmasked(name, kwargs.get("unmask_runtime", False))
# See if the service is already running
if before_toggle_status:
ret["comment"] = "\n".join(
[
_f
for _f in [
"The service {} is already running".format(name),
unmask_ret["comment"],
]
if _f
]
)
if enable is True and not before_toggle_enable_status:
ret.update(_enable(name, None, **kwargs))
elif enable is False and before_toggle_enable_status:
ret.update(_disable(name, None, **kwargs))
return ret
# Run the tests
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "\n".join(
[
_f
for _f in [
"Service {} is set to start".format(name),
unmask_ret["comment"],
]
if _f
]
)
return ret
# Conditionally add systemd-specific args to call to service.start
start_kwargs, warnings = _get_systemd_only(__salt__["service.start"], kwargs)
if warnings:
_add_warnings(ret, warnings)
if salt.utils.platform.is_windows() and kwargs.get("timeout", False):
start_kwargs.update({"timeout": kwargs.get("timeout")})
# macOS and Windows services cant be started if they are disabled. So we need
# to enable them prior to starting otherwise we will always fail.
macos = salt.utils.platform.is_darwin()
windows = salt.utils.platform.is_windows()
if (macos or windows) and not before_toggle_enable_status:
# fail if enable isn't set.
if not enable:
ret["comment"] = (
"The service {} is disabled but enable is not True. Set "
"enable to True to successfully start the service.".format(name)
)
ret["result"] = False
return ret
ret.update(_enable(name, None, **kwargs))
# we've already enabled we don't need to enable again later.
enable = None
try:
func_ret = __salt__["service.start"](name, **start_kwargs)
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
if not func_ret:
ret["result"] = False
ret["comment"] = "Service {} failed to start".format(name)
if enable is True:
ret.update(_enable(name, False, result=False, **kwargs))
elif enable is False:
ret.update(_disable(name, False, result=False, **kwargs))
return ret
if init_delay:
time.sleep(init_delay)
# only force a change state if we have explicitly detected them
after_toggle_status = __salt__["service.status"](name, sig, **status_kwargs)
if "service.enabled" in __salt__:
after_toggle_enable_status = __salt__["service.enabled"](name)
else:
after_toggle_enable_status = True
if (
(before_toggle_enable_status != after_toggle_enable_status)
or (before_toggle_status != after_toggle_status)
) and not ret.get("changes", {}):
ret["changes"][name] = after_toggle_status
if after_toggle_status:
ret["comment"] = "Started service {}".format(name)
else:
ret["comment"] = "Service {} failed to start".format(name)
ret["result"] = False
if enable is True:
ret.update(
_enable(name, after_toggle_status, result=after_toggle_status, **kwargs)
)
elif enable is False:
ret.update(
_disable(name, after_toggle_status, result=after_toggle_status, **kwargs)
)
if init_delay:
ret["comment"] = "{}\nDelayed return for {} seconds".format(
ret["comment"], init_delay
)
if kwargs.get("unmask", False):
ret["comment"] = "\n".join([ret["comment"], unmask_ret["comment"]])
return ret
def dead(name, enable=None, sig=None, init_delay=None, **kwargs):
"""
Ensure that the named service is dead by stopping the service if it is running
name
The name of the init or rc script used to manage the service
enable
Set the service to be enabled at boot time, ``True`` sets the service
to be enabled, ``False`` sets the named service to be disabled. The
default is ``None``, which does not enable or disable anything.
sig
The string to search for when looking for the service process with ps
init_delay
Add a sleep command (in seconds) before the check to make sure service
is killed.
.. versionadded:: 2017.7.0
no_block : False
**For systemd minions only.** Stops the service using ``--no-block``.
.. versionadded:: 2017.7.0
timeout
**For Windows minions only.**
The time in seconds to wait for the service to stop before returning.
Default is the default for :py:func:`win_service.stop
<salt.modules.win_service.stop>`.
.. versionadded:: 2017.7.9,2018.3.4
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# used to let execution modules know which service state is being run.
__context__["service.state"] = "dead"
# Check for common error: using enabled option instead of enable
if "enabled" in kwargs:
return _enabled_used_error(ret)
# Convert enable to boolean in case user passed a string value
if isinstance(enable, str):
enable = salt.utils.data.is_true(enable)
if _offline():
ret["result"] = True
ret["comment"] = "Running in OFFLINE mode. Nothing to do"
return ret
# Check if the service is available
try:
if not _available(name, ret):
if __opts__.get("test"):
ret["result"] = None
ret["comment"] = (
"Service {} not present; if created in this state run, "
"it would have been stopped".format(name)
)
else:
# A non-available service is OK here, don't let the state fail
# because of it.
ret["result"] = True
return ret
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
status_kwargs, warnings = _get_systemd_only(__salt__["service.status"], kwargs)
if warnings:
_add_warnings(ret, warnings)
# lot of custom init script won't or mis implement the status
# command, so it is just an indicator but can not be fully trusted
before_toggle_status = __salt__["service.status"](name, sig, **status_kwargs)
if "service.enabled" in __salt__:
if salt.utils.platform.is_windows():
# service.enabled in Windows returns True for services that are set
# to Auto start, but services set to Manual can also be disabled
before_toggle_enable_status = __salt__["service.info"](name)[
"StartType"
] in ["Auto", "Manual"]
else:
before_toggle_enable_status = __salt__["service.enabled"](name)
else:
before_toggle_enable_status = True
# See if the service is already dead
if not before_toggle_status:
ret["comment"] = "The service {} is already dead".format(name)
if enable is True and not before_toggle_enable_status:
ret.update(_enable(name, None, **kwargs))
elif enable is False and before_toggle_enable_status:
ret.update(_disable(name, None, **kwargs))
return ret
# Run the tests
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Service {} is set to be killed".format(name)
return ret
# Conditionally add systemd-specific args to call to service.start
stop_kwargs, warnings = _get_systemd_only(__salt__["service.stop"], kwargs)
if warnings:
_add_warnings(ret, warnings)
if salt.utils.platform.is_windows() and kwargs.get("timeout", False):
stop_kwargs.update({"timeout": kwargs.get("timeout")})
func_ret = __salt__["service.stop"](name, **stop_kwargs)
if not func_ret:
ret["result"] = False
ret["comment"] = "Service {} failed to die".format(name)
if enable is True:
ret.update(_enable(name, True, result=False, **kwargs))
elif enable is False:
ret.update(_disable(name, True, result=False, **kwargs))
return ret
if init_delay:
time.sleep(init_delay)
# only force a change state if we have explicitly detected them
after_toggle_status = __salt__["service.status"](name, **status_kwargs)
if "service.enabled" in __salt__:
after_toggle_enable_status = __salt__["service.enabled"](name)
else:
after_toggle_enable_status = True
if (
(before_toggle_enable_status != after_toggle_enable_status)
or (before_toggle_status != after_toggle_status)
) and not ret.get("changes", {}):
ret["changes"][name] = after_toggle_status
# be sure to stop, in case we mis detected in the check
if after_toggle_status:
ret["result"] = False
ret["comment"] = "Service {} failed to die".format(name)
else:
ret["comment"] = "Service {} was killed".format(name)
if enable is True:
ret.update(
_enable(name, after_toggle_status, result=not after_toggle_status, **kwargs)
)
elif enable is False:
ret.update(
_disable(
name, after_toggle_status, result=not after_toggle_status, **kwargs
)
)
return ret
def enabled(name, **kwargs):
"""
Ensure that the service is enabled on boot, only use this state if you
don't want to manage the running process, remember that if you want to
enable a running service to use the enable: True option for the running
or dead function.
name
The name of the init or rc script used to manage the service
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# used to let execution modules know which service state is being run.
__context__["service.state"] = "enabled"
ret.update(_enable(name, None, **kwargs))
return ret
def disabled(name, **kwargs):
"""
Ensure that the service is disabled on boot, only use this state if you
don't want to manage the running process, remember that if you want to
disable a service to use the enable: False option for the running or dead
function.
name
The name of the init or rc script used to manage the service
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
# used to let execution modules know which service state is being run.
__context__["service.state"] = "disabled"
ret.update(_disable(name, None, **kwargs))
return ret
def masked(name, runtime=False):
"""
.. versionadded:: 2017.7.0
.. note::
This state is only available on minions which use systemd_.
Ensures that the named service is masked (i.e. prevented from being
started).
name
Name of the service to mask
runtime : False
By default, this state will manage an indefinite mask for the named
service. Set this argument to ``True`` to runtime mask the service.
.. note::
It is possible for a service to have both indefinite and runtime masks
set for it. Therefore, this state will manage a runtime or indefinite
mask independently of each other. This means that if the service is
already indefinitely masked, running this state with ``runtime`` set to
``True`` will _not_ remove the indefinite mask before setting a runtime
mask. In these cases, if it is desirable to ensure that the service is
runtime masked and not indefinitely masked, pair this state with a
:py:func:`service.unmasked <salt.states.service.unmasked>` state, like
so:
.. code-block:: yaml
mask_runtime_foo:
service.masked:
- name: foo
- runtime: True
unmask_indefinite_foo:
service.unmasked:
- name: foo
- runtime: False
.. _systemd: https://freedesktop.org/wiki/Software/systemd/
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if "service.masked" not in __salt__:
ret["comment"] = "Service masking not available on this minion"
ret["result"] = False
return ret
mask_type = "runtime masked" if runtime else "masked"
expected_changes = {mask_type: {"old": False, "new": True}}
try:
if __salt__["service.masked"](name, runtime):
ret["comment"] = "Service {} is already {}".format(
name,
mask_type,
)
return ret
if __opts__["test"]:
ret["result"] = None
ret["changes"] = expected_changes
ret["comment"] = "Service {} would be {}".format(name, mask_type)
return ret
__salt__["service.mask"](name, runtime)
if __salt__["service.masked"](name, runtime):
ret["changes"] = expected_changes
ret["comment"] = "Service {} was {}".format(name, mask_type)
else:
ret["comment"] = "Failed to mask service {}".format(name)
return ret
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
def unmasked(name, runtime=False):
"""
.. versionadded:: 2017.7.0
.. note::
This state is only available on minions which use systemd_.
Ensures that the named service is unmasked
name
Name of the service to unmask
runtime : False
By default, this state will manage an indefinite mask for the named
service. Set this argument to ``True`` to ensure that the service is
runtime masked.
.. note::
It is possible for a service to have both indefinite and runtime masks
set for it. Therefore, this state will manage a runtime or indefinite
mask independently of each other. This means that if the service is
indefinitely masked, running this state with ``runtime`` set to
``True`` will _not_ remove the indefinite mask.
.. _systemd: https://freedesktop.org/wiki/Software/systemd/
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if "service.masked" not in __salt__:
ret["comment"] = "Service masking not available on this minion"
ret["result"] = False
return ret
mask_type = "runtime masked" if runtime else "masked"
action = "runtime unmasked" if runtime else "unmasked"
expected_changes = {mask_type: {"old": True, "new": False}}
try:
if not __salt__["service.masked"](name, runtime):
ret["comment"] = "Service {} was already {}".format(name, action)
return ret
if __opts__["test"]:
ret["result"] = None
ret["changes"] = expected_changes
ret["comment"] = "Service {} would be {}".format(name, action)
return ret
__salt__["service.unmask"](name, runtime)
if not __salt__["service.masked"](name, runtime):
ret["changes"] = expected_changes
ret["comment"] = "Service {} was {}".format(name, action)
else:
ret["comment"] = "Failed to unmask service {}".format(name)
return ret
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
def mod_watch(
name,
sfun=None,
sig=None,
reload=False,
full_restart=False,
init_delay=None,
force=False,
**kwargs
):
"""
The service watcher, called to invoke the watch command.
When called, it will restart or reload the named service.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the watching service
(e.g. ``service.running``).
name
The name of the service to control.
sfun
The original function which triggered the mod_watch call
(`service.running`, for example).
sig
The string to search for when looking for the service process with ps.
reload
When set, reload the service instead of restarting it
(e.g. ``service nginx reload``).
full_restart
Perform a full stop/start of a service by passing ``--full-restart``.
This option is ignored if ``reload`` is set and is supported by only a few
:py:func:`service modules <salt.modules.service>`.
force
Use service.force_reload instead of reload (needs reload to be set to True).
init_delay
Add a sleep command (in seconds) before the service is restarted/reloaded.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
past_participle = None
status_kwargs, warnings = _get_systemd_only(__salt__["service.status"], kwargs)
if warnings:
_add_warnings(ret, warnings)
if sfun == "dead":
verb = "stop"
past_participle = verb + "ped"
if __salt__["service.status"](name, sig, **status_kwargs):
func = __salt__["service.stop"]
else:
ret["result"] = True
ret["comment"] = "Service is already {}".format(past_participle)
return ret
elif sfun == "running":
if __salt__["service.status"](name, sig, **status_kwargs):
if "service.reload" in __salt__ and reload:
if "service.force_reload" in __salt__ and force:
func = __salt__["service.force_reload"]
verb = "forcefully reload"
else:
func = __salt__["service.reload"]
verb = "reload"
elif "service.full_restart" in __salt__ and full_restart:
func = __salt__["service.full_restart"]
verb = "fully restart"
else:
func = __salt__["service.restart"]
verb = "restart"
else:
func = __salt__["service.start"]
verb = "start"
if not past_participle:
past_participle = verb + "ed"
else:
ret["comment"] = "Unable to trigger watch for service.{}".format(sfun)
ret["result"] = False
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Service is set to be {}".format(past_participle)
return ret
if verb == "start" and "service.stop" in __salt__:
# stop service before start
__salt__["service.stop"](name)
func_kwargs, warnings = _get_systemd_only(func, kwargs)
if warnings:
_add_warnings(ret, warnings)
try:
result = func(name, **func_kwargs)
except CommandExecutionError as exc:
ret["result"] = False
ret["comment"] = exc.strerror
return ret
if init_delay:
time.sleep(init_delay)
ret["changes"] = {name: result}
ret["result"] = result
ret["comment"] = (
"Service {}".format(past_participle)
if result
else "Failed to {} the service".format(verb)
)
return ret
def mod_beacon(name, **kwargs):
"""
Create a beacon to monitor a service based on a beacon state argument.
.. note::
This state exists to support special handling of the ``beacon``
state argument for supported state functions. It should not be called directly.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
sfun = kwargs.pop("sfun", None)
supported_funcs = ["running", "dead"]
if sfun in supported_funcs:
if kwargs.get("beacon"):
beacon_module = "service"
data = {}
_beacon_data = kwargs.get("beacon_data", {})
data["onchangeonly"] = _beacon_data.get("onchangeonly", True)
data["delay"] = _beacon_data.get("delay", 0)
data["emitatstartup"] = _beacon_data.get("emitatstartup", False)
data["uncleanshutdown"] = _beacon_data.get("emitatstartup", None)
beacon_name = "beacon_{}_{}".format(beacon_module, name)
beacon_kwargs = {
"name": beacon_name,
"services": {name: data},
"interval": _beacon_data.get("interval", 60),
"beacon_module": beacon_module,
}
ret = __states__["beacon.present"](**beacon_kwargs)
return ret
else:
return {
"name": name,
"changes": {},
"comment": "Not adding beacon.",
"result": True,
}
else:
return {
"name": name,
"changes": {},
"comment": "service.{} does not work with the beacon state function".format(
sfun
),
"result": False,
}
|
|
from contextlib import contextmanager
from sqlalchemy.types import NULLTYPE, Integer
from sqlalchemy import schema as sa_schema
from . import util
from .compat import string_types
from .ddl import impl
__all__ = ('Operations',)
class Operations(object):
"""Define high level migration operations.
Each operation corresponds to some schema migration operation,
executed against a particular :class:`.MigrationContext`
which in turn represents connectivity to a database,
or a file output stream.
While :class:`.Operations` is normally configured as
part of the :meth:`.EnvironmentContext.run_migrations`
method called from an ``env.py`` script, a standalone
:class:`.Operations` instance can be
made for use cases external to regular Alembic
migrations by passing in a :class:`.MigrationContext`::
from alembic.migration import MigrationContext
from alembic.operations import Operations
conn = myengine.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
op.alter_column("t", "c", nullable=True)
"""
def __init__(self, migration_context):
"""Construct a new :class:`.Operations`
:param migration_context: a :class:`.MigrationContext`
instance.
"""
self.migration_context = migration_context
self.impl = migration_context.impl
@classmethod
@contextmanager
def context(cls, migration_context):
from .op import _install_proxy, _remove_proxy
op = Operations(migration_context)
_install_proxy(op)
yield op
_remove_proxy()
def _primary_key_constraint(self, name, table_name, cols, schema=None):
m = sa_schema.MetaData()
columns = [sa_schema.Column(n, NULLTYPE) for n in cols]
t1 = sa_schema.Table(table_name, m,
*columns,
schema=schema)
p = sa_schema.PrimaryKeyConstraint(*columns, name=name)
t1.append_constraint(p)
return p
def _foreign_key_constraint(self, name, source, referent,
local_cols, remote_cols,
onupdate=None, ondelete=None,
deferrable=None, source_schema=None,
referent_schema=None):
m = sa_schema.MetaData()
if source == referent:
t1_cols = local_cols + remote_cols
else:
t1_cols = local_cols
sa_schema.Table(referent, m,
*[sa_schema.Column(n, NULLTYPE) for n in remote_cols],
schema=referent_schema)
t1 = sa_schema.Table(source, m,
*[sa_schema.Column(n, NULLTYPE) for n in t1_cols],
schema=source_schema)
tname = "%s.%s" % (referent_schema, referent) if referent_schema \
else referent
f = sa_schema.ForeignKeyConstraint(local_cols,
["%s.%s" % (tname, n)
for n in remote_cols],
name=name,
onupdate=onupdate,
ondelete=ondelete,
deferrable=deferrable
)
t1.append_constraint(f)
return f
def _unique_constraint(self, name, source, local_cols, schema=None, **kw):
t = sa_schema.Table(source, sa_schema.MetaData(),
*[sa_schema.Column(n, NULLTYPE) for n in local_cols],
schema=schema)
kw['name'] = name
uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw)
# TODO: need event tests to ensure the event
# is fired off here
t.append_constraint(uq)
return uq
def _check_constraint(self, name, source, condition, schema=None, **kw):
t = sa_schema.Table(source, sa_schema.MetaData(),
sa_schema.Column('x', Integer), schema=schema)
ck = sa_schema.CheckConstraint(condition, name=name, **kw)
t.append_constraint(ck)
return ck
def _table(self, name, *columns, **kw):
m = sa_schema.MetaData()
t = sa_schema.Table(name, m, *columns, **kw)
for f in t.foreign_keys:
self._ensure_table_for_fk(m, f)
return t
def _column(self, name, type_, **kw):
return sa_schema.Column(name, type_, **kw)
def _index(self, name, tablename, columns, schema=None, **kw):
t = sa_schema.Table(tablename or 'no_table', sa_schema.MetaData(),
*[sa_schema.Column(n, NULLTYPE) for n in columns],
schema=schema
)
return sa_schema.Index(name, *[t.c[n] for n in columns], **kw)
def _parse_table_key(self, table_key):
if '.' in table_key:
tokens = table_key.split('.')
sname = ".".join(tokens[0:-1])
tname = tokens[-1]
else:
tname = table_key
sname = None
return (sname, tname)
def _ensure_table_for_fk(self, metadata, fk):
"""create a placeholder Table object for the referent of a
ForeignKey.
"""
if isinstance(fk._colspec, string_types):
table_key, cname = fk._colspec.rsplit('.', 1)
sname, tname = self._parse_table_key(table_key)
if table_key not in metadata.tables:
rel_t = sa_schema.Table(tname, metadata, schema=sname)
else:
rel_t = metadata.tables[table_key]
if cname not in rel_t.c:
rel_t.append_column(sa_schema.Column(cname, NULLTYPE))
def get_context(self):
"""Return the :class:`.MigrationContext` object that's
currently in use.
"""
return self.migration_context
def rename_table(self, old_table_name, new_table_name, schema=None):
"""Emit an ALTER TABLE to rename a table.
:param old_table_name: old name.
:param new_table_name: new name.
:param schema: Optional schema name to operate within.
"""
self.impl.rename_table(
old_table_name,
new_table_name,
schema=schema
)
@util._with_legacy_names([('name', 'new_column_name')])
def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
new_column_name=None,
type_=None,
autoincrement=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
existing_autoincrement=None,
schema=None
):
"""Issue an "alter column" instruction using the
current migration context.
Generally, only that aspect of the column which
is being changed, i.e. name, type, nullability,
default, needs to be specified. Multiple changes
can also be specified at once and the backend should
"do the right thing", emitting each change either
separately or together as the backend allows.
MySQL has special requirements here, since MySQL
cannot ALTER a column without a full specification.
When producing MySQL-compatible migration files,
it is recommended that the ``existing_type``,
``existing_server_default``, and ``existing_nullable``
parameters be present, if not being altered.
Type changes which are against the SQLAlchemy
"schema" types :class:`~sqlalchemy.types.Boolean`
and :class:`~sqlalchemy.types.Enum` may also
add or drop constraints which accompany those
types on backends that don't support them natively.
The ``existing_server_default`` argument is
used in this case as well to remove a previous
constraint.
:param table_name: string name of the target table.
:param column_name: string name of the target column,
as it exists before the operation begins.
:param nullable: Optional; specify ``True`` or ``False``
to alter the column's nullability.
:param server_default: Optional; specify a string
SQL expression, :func:`~sqlalchemy.sql.expression.text`,
or :class:`~sqlalchemy.schema.DefaultClause` to indicate
an alteration to the column's default value.
Set to ``None`` to have the default removed.
:param new_column_name: Optional; specify a string name here to
indicate the new name within a column rename operation.
.. versionchanged:: 0.5.0
The ``name`` parameter is now named ``new_column_name``.
The old name will continue to function for backwards
compatibility.
:param ``type_``: Optional; a :class:`~sqlalchemy.types.TypeEngine`
type object to specify a change to the column's type.
For SQLAlchemy types that also indicate a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
the constraint is also generated.
:param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
currently understood by the MySQL dialect.
:param existing_type: Optional; a
:class:`~sqlalchemy.types.TypeEngine`
type object to specify the previous type. This
is required for all MySQL column alter operations that
don't otherwise specify a new type, as well as for
when nullability is being changed on a SQL Server
column. It is also used if the type is a so-called
SQLlchemy "schema" type which may define a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`,
:class:`~sqlalchemy.types.Enum`),
so that the constraint can be dropped.
:param existing_server_default: Optional; The existing
default value of the column. Required on MySQL if
an existing default is not being changed; else MySQL
removes the default.
:param existing_nullable: Optional; the existing nullability
of the column. Required on MySQL if the existing nullability
is not being changed; else MySQL sets this to NULL.
:param existing_autoincrement: Optional; the existing autoincrement
of the column. Used for MySQL's system of altering a column
that specifies ``AUTO_INCREMENT``.
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
"""
compiler = self.impl.dialect.statement_compiler(
self.impl.dialect,
None
)
def _count_constraint(constraint):
return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and \
(not constraint._create_rule or
constraint._create_rule(compiler))
if existing_type and type_:
t = self._table(table_name,
sa_schema.Column(column_name, existing_type),
schema=schema
)
for constraint in t.constraints:
if _count_constraint(constraint):
self.impl.drop_constraint(constraint)
self.impl.alter_column(table_name, column_name,
nullable=nullable,
server_default=server_default,
name=new_column_name,
type_=type_,
schema=schema,
autoincrement=autoincrement,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_autoincrement=existing_autoincrement
)
if type_:
t = self._table(table_name,
sa_schema.Column(column_name, type_),
schema=schema
)
for constraint in t.constraints:
if _count_constraint(constraint):
self.impl.add_constraint(constraint)
def add_column(self, table_name, column, schema=None):
"""Issue an "add column" instruction using the current
migration context.
e.g.::
from alembic import op
from sqlalchemy import Column, String
op.add_column('organization',
Column('name', String())
)
The provided :class:`~sqlalchemy.schema.Column` object can also
specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing
a remote table name. Alembic will automatically generate a stub
"referenced" table and emit a second ALTER statement in order
to add the constraint separately::
from alembic import op
from sqlalchemy import Column, INTEGER, ForeignKey
op.add_column('organization',
Column('account_id', INTEGER, ForeignKey('accounts.id'))
)
Note that this statement uses the :class:`~sqlalchemy.schema.Column`
construct as is from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the column add
op.add_column('account',
Column('timestamp', TIMESTAMP, server_default=func.now())
)
:param table_name: String name of the parent table.
:param column: a :class:`sqlalchemy.schema.Column` object
representing the new column.
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
"""
t = self._table(table_name, column, schema=schema)
self.impl.add_column(
table_name,
column,
schema=schema
)
for constraint in t.constraints:
if not isinstance(constraint, sa_schema.PrimaryKeyConstraint):
self.impl.add_constraint(constraint)
def drop_column(self, table_name, column_name, **kw):
"""Issue a "drop column" instruction using the current
migration context.
e.g.::
drop_column('organization', 'account_id')
:param table_name: name of table
:param column_name: name of column
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
:param mssql_drop_check: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the CHECK constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.check_constraints,
then exec's a separate DROP CONSTRAINT for that constraint.
:param mssql_drop_default: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the DEFAULT constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.default_constraints,
then exec's a separate DROP CONSTRAINT for that default.
:param mssql_drop_foreign_key: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop a single FOREIGN KEY constraint on the column using a
SQL-script-compatible
block that selects into a @variable from
sys.foreign_keys/sys.foreign_key_columns,
then exec's a separate DROP CONSTRAINT for that default. Only
works if the column has exactly one FK constraint which refers to
it, at the moment.
.. versionadded:: 0.6.2
"""
self.impl.drop_column(
table_name,
self._column(column_name, NULLTYPE),
**kw
)
def create_primary_key(self, name, table_name, cols, schema=None):
"""Issue a "create primary key" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_primary_key(
"pk_my_table", "my_table",
["id", "version"]
)
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.PrimaryKeyConstraint`
object which it then associates with the :class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
.. versionadded:: 0.5.0
:param name: Name of the primary key constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
`NamingConventions <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/NamingConventions>`_,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the target table.
:param cols: a list of string column names to be applied to the
primary key constraint.
:param schema: Optional schema name of the table.
"""
self.impl.add_constraint(
self._primary_key_constraint(name, table_name, cols,
schema)
)
def create_foreign_key(self, name, source, referent, local_cols,
remote_cols, onupdate=None, ondelete=None,
deferrable=None, source_schema=None,
referent_schema=None):
"""Issue a "create foreign key" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_foreign_key(
"fk_user_address", "address",
"user", ["user_id"], ["id"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.ForeignKeyConstraint`
object which it then associates with the :class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the foreign key constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
`NamingConventions <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/NamingConventions>`_,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source: String name of the source table.
:param referent: String name of the destination table.
:param local_cols: a list of string column names in the
source table.
:param remote_cols: a list of string column names in the
remote table.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param source_schema: Optional schema name of the source table.
:param referent_schema: Optional schema name of the destination table.
"""
self.impl.add_constraint(
self._foreign_key_constraint(name, source, referent,
local_cols, remote_cols,
onupdate=onupdate, ondelete=ondelete,
deferrable=deferrable, source_schema=source_schema,
referent_schema=referent_schema)
)
def create_unique_constraint(self, name, source, local_cols,
schema=None, **kw):
"""Issue a "create unique constraint" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_unique_constraint("uq_user_name", "user", ["name"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.UniqueConstraint`
object which it then associates with the :class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the unique constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
`NamingConventions <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/NamingConventions>`_,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source: String name of the source table. Dotted schema names are
supported.
:param local_cols: a list of string column names in the
source table.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
"""
self.impl.add_constraint(
self._unique_constraint(name, source, local_cols,
schema=schema, **kw)
)
def create_check_constraint(self, name, source, condition,
schema=None, **kw):
"""Issue a "create check constraint" instruction using the
current migration context.
e.g.::
from alembic import op
from sqlalchemy.sql import column, func
op.create_check_constraint(
"ck_user_name_len",
"user",
func.len(column('name')) > 5
)
CHECK constraints are usually against a SQL expression, so ad-hoc
table metadata is usually needed. The function will convert the given
arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
to an anonymous table in order to emit the CREATE statement.
:param name: Name of the check constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
`NamingConventions <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/NamingConventions>`_,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source: String name of the source table.
:param condition: SQL expression that's the condition of the constraint.
Can be a string or SQLAlchemy expression language structure.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param schema: Optional schema name to operate within.
..versionadded:: 0.4.0
"""
self.impl.add_constraint(
self._check_constraint(name, source, condition, schema=schema, **kw)
)
def create_table(self, name, *columns, **kw):
"""Issue a "create table" instruction using the current migration context.
This directive receives an argument list similar to that of the
traditional :class:`sqlalchemy.schema.Table` construct, but without the
metadata::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
op.create_table(
'account',
Column('id', INTEGER, primary_key=True),
Column('name', VARCHAR(50), nullable=False),
Column('description', NVARCHAR(200))
Column('timestamp', TIMESTAMP, server_default=func.now())
)
Note that :meth:`.create_table` accepts :class:`~sqlalchemy.schema.Column`
constructs directly from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the "timestamp" column
op.create_table('account',
Column('id', INTEGER, primary_key=True),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
:param name: Name of the table
:param \*columns: collection of :class:`~sqlalchemy.schema.Column`
objects within
the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
objects
and :class:`~.sqlalchemy.schema.Index` objects.
:param schema: Optional schema name to operate within.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
"""
new_table = self._table(name, *columns, **kw)
self.impl.create_table(new_table)
return new_table
def drop_table(self, name, **kw):
"""Issue a "drop table" instruction using the current
migration context.
e.g.::
drop_table("accounts")
:param name: Name of the table
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
"""
self.impl.drop_table(
self._table(name, **kw)
)
def create_index(self, name, table_name, columns, schema=None, **kw):
"""Issue a "create index" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_index('ik_test', 't1', ['foo', 'bar'])
:param name: name of the index.
:param table_name: name of the owning table.
.. versionchanged:: 0.5.0
The ``tablename`` parameter is now named ``table_name``.
As this is a positional argument, the old name is no
longer present.
:param columns: a list of string column names in the
table.
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
"""
self.impl.create_index(
self._index(name, table_name, columns, schema=schema, **kw)
)
@util._with_legacy_names([('tablename', 'table_name')])
def drop_index(self, name, table_name=None, schema=None):
"""Issue a "drop index" instruction using the current
migration context.
e.g.::
drop_index("accounts")
:param name: name of the index.
:param table_name: name of the owning table. Some
backends such as Microsoft SQL Server require this.
.. versionchanged:: 0.5.0
The ``tablename`` parameter is now named ``table_name``.
The old name will continue to function for backwards
compatibility.
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
"""
# need a dummy column name here since SQLAlchemy
# 0.7.6 and further raises on Index with no columns
self.impl.drop_index(
self._index(name, table_name, ['x'], schema=schema)
)
@util._with_legacy_names([("type", "type_")])
def drop_constraint(self, name, table_name, type_=None, schema=None):
"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
:param name: name of the constraint.
:param table_name: table name.
.. versionchanged:: 0.5.0
The ``tablename`` parameter is now named ``table_name``.
As this is a positional argument, the old name is no
longer present.
:param ``type_``: optional, required on MySQL. can be
'foreignkey', 'primary', 'unique', or 'check'.
.. versionchanged:: 0.5.0
The ``type`` parameter is now named ``type_``. The old name
``type`` will remain for backwards compatibility.
.. versionadded:: 0.3.6 'primary' qualfier to enable
dropping of MySQL primary key constraints.
:param schema: Optional schema name to operate within.
.. versionadded:: 0.4.0
"""
t = self._table(table_name, schema=schema)
types = {
'foreignkey': lambda name: sa_schema.ForeignKeyConstraint(
[], [], name=name),
'primary': sa_schema.PrimaryKeyConstraint,
'unique': sa_schema.UniqueConstraint,
'check': lambda name: sa_schema.CheckConstraint("", name=name),
None: sa_schema.Constraint
}
try:
const = types[type_]
except KeyError:
raise TypeError("'type' can be one of %s" %
", ".join(sorted(repr(x) for x in types)))
const = const(name=name)
t.append_constraint(const)
self.impl.drop_constraint(const)
def bulk_insert(self, table, rows):
"""Issue a "bulk insert" operation using the current
migration context.
This provides a means of representing an INSERT of multiple rows
which works equally well in the context of executing on a live
connection as well as that of generating a SQL script. In the
case of a SQL script, the values are rendered inline into the
statement.
e.g.::
from alembic import op
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
# Create an ad-hoc table to use for the insert statement.
accounts_table = table('account',
column('id', Integer),
column('name', String),
column('create_date', Date)
)
op.bulk_insert(accounts_table,
[
{'id':1, 'name':'John Smith',
'create_date':date(2010, 10, 5)},
{'id':2, 'name':'Ed Williams',
'create_date':date(2007, 5, 27)},
{'id':3, 'name':'Wendy Jones',
'create_date':date(2008, 8, 15)},
]
)
"""
self.impl.bulk_insert(table, rows)
def inline_literal(self, value, type_=None):
"""Produce an 'inline literal' expression, suitable for
using in an INSERT, UPDATE, or DELETE statement.
When using Alembic in "offline" mode, CRUD operations
aren't compatible with SQLAlchemy's default behavior surrounding
literal values,
which is that they are converted into bound values and passed
separately into the ``execute()`` method of the DBAPI cursor.
An offline SQL
script needs to have these rendered inline. While it should
always be noted that inline literal values are an **enormous**
security hole in an application that handles untrusted input,
a schema migration is not run in this context, so
literals are safe to render inline, with the caveat that
advanced types like dates may not be supported directly
by SQLAlchemy.
See :meth:`.execute` for an example usage of
:meth:`.inline_literal`.
:param value: The value to render. Strings, integers, and simple
numerics should be supported. Other types like boolean,
dates, etc. may or may not be supported yet by various
backends.
:param ``type_``: optional - a :class:`sqlalchemy.types.TypeEngine`
subclass stating the type of this value. In SQLAlchemy
expressions, this is usually derived automatically
from the Python type of the value itself, as well as
based on the context in which the value is used.
"""
return impl._literal_bindparam(None, value, type_=type_)
def execute(self, sql, execution_options=None):
"""Execute the given SQL using the current migration context.
In a SQL script context, the statement is emitted directly to the
output stream. There is *no* return result, however, as this
function is oriented towards generating a change script
that can run in "offline" mode. For full interaction
with a connected database, use the "bind" available
from the context::
from alembic import op
connection = op.get_bind()
Also note that any parameterized statement here *will not work*
in offline mode - INSERT, UPDATE and DELETE statements which refer
to literal values would need to render
inline expressions. For simple use cases, the
:meth:`.inline_literal` function can be used for **rudimentary**
quoting of string values. For "bulk" inserts, consider using
:meth:`.bulk_insert`.
For example, to emit an UPDATE statement which is equally
compatible with both online and offline mode::
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table('account',
column('name', String)
)
op.execute(
account.update().\\
where(account.c.name==op.inline_literal('account 1')).\\
values({'name':op.inline_literal('account 2')})
)
Note above we also used the SQLAlchemy
:func:`sqlalchemy.sql.expression.table`
and :func:`sqlalchemy.sql.expression.column` constructs to make a brief,
ad-hoc table construct just for our UPDATE statement. A full
:class:`~sqlalchemy.schema.Table` construct of course works perfectly
fine as well, though note it's a recommended practice to at least ensure
the definition of a table is self-contained within the migration script,
rather than imported from a module that may break compatibility with
older migrations.
:param sql: Any legal SQLAlchemy expression, including:
* a string
* a :func:`sqlalchemy.sql.expression.text` construct.
* a :func:`sqlalchemy.sql.expression.insert` construct.
* a :func:`sqlalchemy.sql.expression.update`,
:func:`sqlalchemy.sql.expression.insert`,
or :func:`sqlalchemy.sql.expression.delete` construct.
* Pretty much anything that's "executable" as described
in :ref:`sqlexpression_toplevel`.
:param execution_options: Optional dictionary of
execution options, will be passed to
:meth:`sqlalchemy.engine.Connection.execution_options`.
"""
self.migration_context.impl.execute(sql,
execution_options=execution_options)
def get_bind(self):
"""Return the current 'bind'.
Under normal circumstances, this is the
:class:`~sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
In a SQL script context, this value is ``None``. [TODO: verify this]
"""
return self.migration_context.impl.bind
|
|
# coding=utf-8
import errno
import re
import socket
import ssl
import time
import signal
from collections import namedtuple
Prefix = namedtuple('Prefix', ['nick', 'user', 'host', 'raw'])
ServerCmd = namedtuple('ServerCmd', ['prefix', 'cmd', 'args'])
class IRCClient(object):
def __init__(self,
server='127.0.0.1',
port=6667,
ssl=True,
encoding='utf-8',
nick='yetanotherbot',
user=None,
password=None,
realname='',
timeout=120):
self.socket = None
self.quitting = False
self.server = server
self.port = port
self.ssl = ssl
self.encoding = encoding
self.nick = nick
self.user = user
self.password = password
self.realname = realname
self.timeout = timeout
self.channel_nicks = {} # channel name -> set of nicks
self.bad_chars_regex = re.compile(r'[\r\n]+')
self.msg_end_bytes = self.encode('\r\n') # line break all irc messages need to end with
self.max_msg_bytes = 512 - len(self.msg_end_bytes)
self.safe_msg_bytes = 400 # reasonable upper limit of bytes that should be sent via send_msg etc.
signal.signal(signal.SIGINT, lambda signum, frame: self.quit('ctrl-C'))
def encode(self, string):
return string.encode(self.encoding, 'ignore')
def decode(self, bytes):
return bytes.decode(self.encoding, 'ignore')
def send_raw(self, msg):
"""Send a raw line to the server"""
try:
# strip newlines
stripped = self.bad_chars_regex.sub(' ', msg)
encoded = stripped.encode(self.encoding)
# really badly trim msg to max 512bytes (esp. stupid with multibyte chars)
self.socket.send(encoded[:self.max_msg_bytes] + self.msg_end_bytes)
except TypeError as ex:
return False # invalid msg
except Exception as ex:
# something else went horribly wrong, disconnect
self.on_log('Exception while sending data: %s' % repr(ex))
self.disconnect()
return True
def joined_channels(self):
return self.channel_nicks.keys()
def nicks_in_channel(self, channel):
if channel not in self.channel_nicks:
return set()
return self.channel_nicks[channel]
def send_privmsg(self, channel, msg):
"""Send a message to a channel/user"""
if not channel or not msg:
return
self.send_raw('PRIVMSG %s :%s' % (channel, msg))
def send_notice(self, channel, msg):
"""Send a notice to a channel/user"""
if not channel or not msg:
return
self.send_raw('NOTICE %s :%s' % (channel, msg))
def set_nick(self, nick):
"""Set the bot's nick"""
self.send_raw('NICK %s' % nick)
def set_mode(self, nick, mode):
self.send_raw('MODE %s %s' % (nick, mode))
def join(self, channel):
"""Join a channel"""
self.send_raw('JOIN %s' % channel)
if channel not in self.channel_nicks.keys():
self.channel_nicks[channel] = set()
def part(self, channel):
"""Leave a channel"""
self.send_raw('PART %s' % channel)
self.channel_nicks.pop(channel, None)
def quit(self, reason):
"""Quit from the server and end the main loop gracefully"""
self.send_raw('QUIT :%s' % reason)
self.quitting = True
def kick(self, channel, nick, reason = None):
"""Kick someone from a channel"""
if reason is None:
self.send_raw('KICK %s %s' % (channel, nick))
else:
self.send_raw('KICK %s %s %s' % (channel, nick, reason))
def parse_server_cmd(self, cmd):
"""Parse a message received from the server and split it into manageable parts.
*inspired by* twisted's irc implementation"""
prefix = ''
trailing = []
if not cmd:
return None
try:
if cmd[0] == ':':
prefix, cmd = cmd[1:].split(' ', 1)
if cmd.find(' :') != -1:
cmd, trailing = cmd.split(' :', 1)
args = cmd.split()
args.append(trailing)
else:
args = cmd.split()
cmd = args.pop(0)
return ServerCmd(prefix, cmd, args)
except Exception as ex:
self.on_log('Received invalid message from server: %s' % cmd)
return None
def handle_server_cmd(self, cmd):
"""Handle a received command (that has been parsed by parseServerCmd())"""
handler = getattr(self, 'cmd_%s' % cmd.cmd, None)
if handler:
handler(self.split_prefix(cmd.prefix), cmd.args)
def split_prefix(self, prefix):
"""Extract the nick, user and host from a prefix"""
split = prefix.split('!')
nick = split[0]
user = None
host = None
if len(split) > 1:
split = split[1].split('@')
user = split[0]
if len(split) > 1:
host = split[1]
return Prefix(nick, user, host, prefix)
def cmd_NICK(self, prefix, args):
new_nick = args[0]
# update own nick
if prefix.nick == self.nick:
self.nick = new_nick
# update other people's nicks
for chan, nicks in self.channel_nicks.items():
try:
nicks.discard(prefix.nick)
nicks.add(new_nick)
except KeyError:
pass
self.on_nick(prefix, args[0])
def cmd_PING(self, prefix, args):
self.send_raw('PONG :%s' % args[0])
def cmd_PRIVMSG(self, prefix, args):
self.on_privmsg(prefix, args[0], args[1])
def cmd_NOTICE(self, prefix, args):
self.on_notice(prefix, args[0], args[1])
def cmd_QUIT(self, prefix, args):
for chan, nicks in self.channel_nicks.items():
nicks.discard(prefix.nick)
self.on_quit(prefix)
def cmd_ERROR(self, prefix, args):
self.on_error(args[0])
self.disconnect()
def cmd_JOIN(self, prefix, args):
channel = args[0]
# add newly joined nick
if channel in self.channel_nicks.keys():
self.channel_nicks[channel].add(prefix.nick)
# send a WHO, if we joined somewhere
if prefix.nick == self.nick:
self.send_raw('WHO ' + channel)
self.on_join(prefix, channel)
def cmd_PART(self, prefix, args):
chan = args[0]
if chan in self.channel_nicks.keys():
self.channel_nicks[chan].discard(prefix.nick)
self.on_part(prefix, chan)
# ErrNickNameInUse
def cmd_433(self, prefix, args):
self.nick = args[1] + '_'
self.set_nick(self.nick)
# ErrNoMotd
def cmd_422(self, prefix, args):
self.on_serverready()
# EndOfMotd
def cmd_376(self, prefix, args):
self.on_serverready()
# whoreply
def cmd_352(self, prefix, args):
try:
chan = args[1]
nick = args[5]
if chan in self.channel_nicks.keys():
self.channel_nicks[chan].add(nick)
except:
pass
def on_nick(self, prefix, new):
pass
def on_join(self, prefix, channel):
pass
def on_part(self, prefix, channel):
pass
def on_quit(self, prefix):
pass
def on_connect(self):
pass
def on_disconnect(self):
pass
def on_serverready(self):
pass
def on_privmsg(self, prefix, target, msg):
pass
def on_notice(self, prefix, target, msg):
pass
def on_rawmsg(self, msg):
"""raw data from the server"""
pass
def on_error(self, error):
pass
def on_tick(self):
"""Called once roughly every second"""
pass
def on_log(self, msg):
pass
def connect(self):
"""Try to connect to the server"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# enable ssl
if self.ssl:
self.socket = ssl.wrap_socket(self.socket)
try:
self.socket.connect((self.server, self.port))
self.socket.setblocking(False)
if self.password:
self.send_raw('PASS %s' % self.password)
self.set_nick(self.nick)
if self.user:
self.send_raw('USER %s 0 * :%s' % (self.user, self.realname))
except Exception as ex:
self.on_log('Connecting failed: %s' % repr(ex))
return False
self.on_connect()
return True
def disconnect(self):
"""Disconnect from the server, but don't quit the main loop."""
if self.socket:
try:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.on_disconnect()
return True
except:
pass
return False
def run(self):
"""Main loop"""
while not self.quitting:
# try connecting indefinitely
while not self.connect():
time.sleep(30)
# main recv loop
recv = ''
last_time = time.time() # timestamp for detecting timeouts
last_tick = time.time()
sent_ping = False
while not self.quitting:
try:
now = time.time()
diff = now - last_time
# call on_tick every second
if now - last_tick > 1.0:
self.on_tick()
last_tick = time.time()
# send a ping at half the timeout
if diff > self.timeout / 2.0 and not sent_ping:
self.send_raw('PING :%s' % self.nick)
sent_ping = True
# no messages received after timeout, try to reconnect
if diff > self.timeout:
break
recv += self.socket.recv(4098).decode(self.encoding, 'ignore')
last_time = now
sent_ping = False
except socket.error as e:
err = e.args[0]
# sleep for a short time, if no data was received
if err == errno.EAGAIN or err == errno.EWOULDBLOCK or err == errno.ENOENT:
time.sleep(0.1)
continue
except Exception as ex:
self.on_log('Exception occurred receiving data: %s' % repr(ex))
break # break inner loop, try to reconnect
# split received data into messages and process them
while '\r\n' in recv:
line, recv = recv.split('\r\n', 1)
self.on_rawmsg(line)
cmd = self.parse_server_cmd(line)
if cmd:
self.handle_server_cmd(cmd)
self.disconnect()
return True
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import selectors
import signal
import socket
import sys
from threading import Thread
import time
import traceback
from system_test import Logger
from system_test import TIMEOUT
class GracefulExitSignaler:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def split_chunk_for_display(raw_bytes):
"""
Given some raw bytes, return a display string
Only show the beginning and end of largish (2xMAGIC_SIZE) arrays.
:param raw_bytes:
:return: display string
"""
MAGIC_SIZE = 50 # Content repeats after chunks this big - used by echo client, too
if len(raw_bytes) > 2 * MAGIC_SIZE:
result = repr(raw_bytes[:MAGIC_SIZE]) + " ... " + repr(raw_bytes[-MAGIC_SIZE:])
else:
result = repr(raw_bytes)
return result
class TcpEchoClient:
def __init__(self, prefix, host, port, size, count, timeout, logger):
"""
:param host: connect to this host
:param port: connect to this port
:param size: size of individual payload chunks in bytes
:param count: number of payload chunks
:param strategy: "1" Send one payload; # TODO more strategies
Recv one payload
:param logger: Logger() object
:return:
"""
# Start up
self.sock = None
self.prefix = prefix
self.host = host
self.port = int(port)
self.size = size
self.count = count
self.timeout = timeout
self.logger = logger
self.keep_running = True
self.is_running = False
self.exit_status = None
self.error = None
self._thread = Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def run(self):
self.logger.log("%s Client is starting up" % self.prefix)
try:
start_time = time.time()
self.is_running = True
self.logger.log('%s Connecting to host:%s, port:%d, size:%d, count:%d' %
(self.prefix, self.host, self.port, self.size, self.count))
total_sent = 0
total_rcvd = 0
if self.count > 0 and self.size > 0:
# outbound payload only if count and size both greater than zero
payload_out = []
out_list_idx = 0 # current _out array being sent
out_byte_idx = 0 # next-to-send in current array
out_ready_to_send = True
# Generate unique content for each message so you can tell where the message
# or fragment belongs in the whole stream. Chunks look like:
# b'[localhost:33333:6:0]ggggggggggggggggggggggggggggg'
# host: localhost
# port: 33333
# index: 6
# offset into message: 0
CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo server, too
for idx in range(self.count):
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
while len(body_msg) < self.size:
chunk = "[%s:%d:%d:%d]" % (self.host, self.port, idx, len(body_msg))
padlen = CONTENT_CHUNK_SIZE - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.size:
body_msg = body_msg[:self.size]
payload_out.append(bytearray(body_msg.encode()))
# incoming payloads
payload_in = []
in_list_idx = 0 # current _in array being received
for i in range(self.count):
payload_in.append(bytearray())
else:
# when count or size .LE. zero then just connect-disconnect
self.keep_running = False
# set up connection
host_address = (self.host, self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(host_address)
self.sock.setblocking(False)
# set up selector
sel = selectors.DefaultSelector()
sel.register(self.sock,
selectors.EVENT_READ | selectors.EVENT_WRITE)
# event loop
while self.keep_running:
if self.timeout > 0.0:
elapsed = time.time() - start_time
if elapsed > self.timeout:
self.exit_status = "%s Exiting due to timeout. Total sent= %d, total rcvd= %d" % \
(self.prefix, total_sent, total_rcvd)
break
for key, mask in sel.select(timeout=0.1):
sock = key.fileobj
if mask & selectors.EVENT_READ:
recv_data = sock.recv(1024)
if recv_data:
total_rcvd = len(recv_data)
payload_in[in_list_idx].extend(recv_data)
if len(payload_in[in_list_idx]) == self.size:
self.logger.log("%s Rcvd message %d" % (self.prefix, in_list_idx))
in_list_idx += 1
if in_list_idx == self.count:
# Received all bytes of all chunks - done.
self.keep_running = False
# Verify the received data
if payload_in != payload_out:
for idxc in range(self.count):
if not payload_in[idxc] == payload_out[idxc]:
for idxs in range(self.size):
ob = payload_out[idxc][idxs]
ib = payload_in[idxc][idxs]
if ob != ib:
self.error = "%s ERROR Rcvd message verify fail. row:%d, col:%d, " \
"expected:%s, actual:%s" \
% (self.prefix, idxc, idxs, repr(ob), repr(ib))
break
else:
out_ready_to_send = True
sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
elif len(payload_in[in_list_idx]) > self.size:
self.error = "ERROR Received message too big. Expected:%d, actual:%d" % \
(self.size, len(payload_in[in_list_idx]))
break
else:
pass # still accumulating a message
else:
# socket closed
self.keep_running = False
if not in_list_idx == self.count:
self.error = "ERROR server closed. Echoed %d of %d messages." % (in_list_idx, self.count)
if self.keep_running and mask & selectors.EVENT_WRITE:
if out_ready_to_send:
n_sent = self.sock.send(payload_out[out_list_idx][out_byte_idx:])
total_sent += n_sent
out_byte_idx += n_sent
if out_byte_idx == self.size:
self.logger.log("%s Sent message %d" % (self.prefix, out_list_idx))
out_byte_idx = 0
out_list_idx += 1
sel.modify(self.sock, selectors.EVENT_READ) # turn off write events
out_ready_to_send = False # turn on when rcvr receives
else:
pass # logger.log("DEBUG: ignoring EVENT_WRITE")
# shut down
sel.unregister(self.sock)
self.sock.close()
except Exception:
self.error = "ERROR: exception : '%s'" % traceback.format_exc()
self.sock.close()
self.is_running = False
def wait(self, timeout=TIMEOUT):
self.logger.log("%s Client is shutting down" % self.prefix)
self.keep_running = False
self._thread.join(timeout)
def main(argv):
retval = 0
# parse args
p = argparse.ArgumentParser()
p.add_argument('--host', '-b',
help='Required target host')
p.add_argument('--port', '-p', type=int,
help='Required target port number')
p.add_argument('--size', '-s', type=int, default=100, const=1, nargs='?',
help='Size of payload in bytes must be >= 0. Size of zero connects and disconnects with no data traffic.')
p.add_argument('--count', '-c', type=int, default=1, const=1, nargs='?',
help='Number of payloads to process must be >= 0. Count of zero connects and disconnects with no data traffic.')
p.add_argument('--name',
help='Optional logger prefix')
p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?",
help='Timeout in seconds. Default value "0" disables timeouts')
p.add_argument('--log', '-l',
action='store_true',
help='Write activity log to console')
del argv[0]
args = p.parse_args(argv)
# host
if args.host is None:
raise Exception("User must specify a host")
host = args.host
# port
if args.port is None:
raise Exception("User must specify a port number")
port = args.port
# size
if args.size < 0:
raise Exception("Size must be greater than or equal to zero")
size = args.size
# count
if args.count < 0:
raise Exception("Count must be greater than or equal to zero")
count = args.count
# name / prefix
prefix = args.name if args.name is not None else "ECHO_CLIENT (%d_%d_%d)" % \
(port, size, count)
# timeout
if args.timeout < 0.0:
raise Exception("Timeout must be greater than or equal to zero")
signaller = GracefulExitSignaler()
logger = None
try:
# logging
logger = Logger(title="%s host:%s port %d size:%d count:%d" % (prefix, host, port, size, count),
print_to_console=args.log,
save_for_dump=False)
client = TcpEchoClient(prefix, host, port, size, count, args.timeout, logger)
keep_running = True
while keep_running:
time.sleep(0.1)
if client.error is not None:
logger.log("%s Client stopped with error: %s" % (prefix, client.error))
keep_running = False
retval = 1
if client.exit_status is not None:
logger.log("%s Client stopped with status: %s" % (prefix, client.exit_status))
keep_running = False
if signaller.kill_now:
logger.log("%s Process killed with signal" % prefix)
keep_running = False
if keep_running and not client.is_running:
logger.log("%s Client stopped with no error or status" % prefix)
keep_running = False
except Exception:
client.error = "ERROR: exception : '%s'" % traceback.format_exc()
if logger is not None:
logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
retval = 1
if client.error is not None:
# write client errors to stderr
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
elines = client.error.split("\n")
for line in elines:
eprint("ERROR:", prefix, line)
return retval
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
"""Treadmill initialization and server presence daemon.
This service register the node into the Treadmill cell and, as such, is
responsible for publishing the node's capacity to the scheduler.
This service is also responsible for shutting down the node, when necessary or
requested, by disabling all traffic from and to the containers.
"""
import logging
import os
import time
import click
import kazoo
from treadmill import appenv
from treadmill import context
from treadmill import exc
from treadmill import sysinfo
from treadmill import utils
from treadmill import zknamespace as z
from treadmill import zkutils
if os.name == 'posix':
from .. import netdev
from .. import subproc
_LOGGER = logging.getLogger(__name__)
_WATCHDOG_CHECK_INTERVAL = 30
_KERNEL_WATCHDOG = None
def init():
"""Top level command handler."""
@click.command()
@click.option('--exit-on-fail', is_flag=True, default=False)
@click.option('--zkid', help='Zookeeper session ID file.')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def top(exit_on_fail, zkid, approot):
"""Run treadmill init process."""
_LOGGER.info('Initializing Treadmill: %s', approot)
tm_env = appenv.AppEnvironment(approot)
zkclient = zkutils.connect(context.GLOBAL.zk.url,
idpath=zkid,
listener=_exit_clear_watchdog_on_lost)
utils.report_ready()
while not zkclient.exists(z.SERVER_PRESENCE):
_LOGGER.warn('namespace not ready.')
time.sleep(30)
hostname = sysinfo.hostname()
zk_blackout_path = z.path.blackedout_server(hostname)
zk_presence_path = z.path.server_presence(hostname)
zk_server_path = z.path.server(hostname)
while not zkclient.exists(zk_server_path):
_LOGGER.warn('server %s not defined in the cell.', hostname)
time.sleep(30)
_LOGGER.info('Checking blackout list.')
blacklisted = bool(zkclient.exists(zk_blackout_path))
if not blacklisted:
# Node startup.
_node_start(tm_env, zkclient, hostname,
zk_server_path, zk_presence_path)
# Cleanup the watchdog directory
tm_env.watchdogs.initialize()
_init_network()
_LOGGER.info('Ready.')
down_reason = _main_loop(tm_env, zkclient, zk_presence_path)
if down_reason is not None:
_LOGGER.warning('Shutting down: %s', down_reason)
# Blackout the server.
zkutils.ensure_exists(
zkclient,
zk_blackout_path,
acl=[zkutils.make_host_acl(hostname, 'rwcda')],
data=down_reason
)
else:
# Node was already blacked out.
_LOGGER.warning('Shutting down blacked out node.')
# This is the shutdown phase.
# Delete the node
zkutils.ensure_deleted(zkclient, zk_presence_path)
zkclient.remove_listener(_exit_clear_watchdog_on_lost)
zkclient.stop()
zkclient.close()
_cleanup_network()
# to ternminate all the running apps
_blackout_terminate(tm_env)
if exit_on_fail:
utils.sys_exit(-1)
else:
# Sit forever in a broken state
while True:
time.sleep(1000000)
return top
def _blackout_terminate(tm_env):
"""Blackout by terminating all containers in running dir.
"""
if os.name == 'posix':
# XXX: This should be replaced with a supervisor module call hidding
# away all s6 related stuff
supervisor_dir = os.path.join(tm_env.init_dir, 'supervisor')
cleanupd_dir = os.path.join(tm_env.init_dir, 'cleanup')
# we first shutdown cleanup so link in /var/tmp/treadmill/cleanup
# will not be recycled before blackout clear
_LOGGER.info('try to shutdown cleanup service')
subproc.check_call(['s6_svc', '-d', cleanupd_dir])
subproc.check_call(['s6_svwait', '-d', cleanupd_dir])
# shutdown all the applications by shutting down supervisor
_LOGGER.info('try to shutdown supervisor')
subproc.check_call(['s6_svc', '-d', supervisor_dir])
else:
# TODO: Implement terminating containers on windows
pass
def _init_network():
"""Initialize network.
"""
if os.name == 'nt':
return
# (Re)Enable IP forwarding
netdev.dev_conf_forwarding_set('tm0', True)
def _cleanup_network():
"""Cleanup network.
"""
if os.name == 'nt':
return
# Disable network traffic from and to the containers.
netdev.dev_conf_forwarding_set('tm0', False)
def _node_start(tm_env, zkclient, hostname,
zk_server_path, zk_presence_path):
"""Node startup. Try to re-establish old session or start fresh.
"""
old_session_ok = False
try:
_data, metadata = zkclient.get(zk_presence_path)
if metadata.owner_session_id == zkclient.client_id[0]:
_LOGGER.info('Reconnecting with previous session: %s',
metadata.owner_session_id)
old_session_ok = True
else:
_LOGGER.info('Session id does not match, new session.')
zkclient.delete(zk_presence_path)
except kazoo.client.NoNodeError:
_LOGGER.info('%s does not exist.', zk_presence_path)
if not old_session_ok:
_node_initialize(tm_env,
zkclient, hostname,
zk_server_path, zk_presence_path)
def _node_initialize(tm_env, zkclient, hostname,
zk_server_path, zk_presence_path):
"""Node initialization. Should only be done on a cold start.
"""
new_node_info = sysinfo.node_info(tm_env)
# Merging scheduler data with node_info data
node_info = zkutils.get(zkclient, zk_server_path)
node_info.update(new_node_info)
_LOGGER.info('Registering node: %s: %s, %r',
zk_server_path, hostname, node_info)
zkutils.update(zkclient, zk_server_path, node_info)
host_acl = zkutils.make_host_acl(hostname, 'rwcda')
_LOGGER.debug('host_acl: %r', host_acl)
zkutils.put(zkclient,
zk_presence_path, {'seen': False},
acl=[host_acl],
ephemeral=True)
# Invoke the local node initialization
tm_env.initialize(node_info)
def _exit_clear_watchdog_on_lost(state):
_LOGGER.debug('ZK connection state: %s', state)
if state == zkutils.states.KazooState.LOST:
_LOGGER.info('Exiting on ZK connection lost.')
utils.sys_exit(-1)
def _main_loop(tm_env, zkclient, zk_presence_path):
"""Main loop.
Wait for zk event and check watchdogs.
"""
down_reason = None
# Now that the server is registered, setup the stop-on-delete
# trigger and the deadman's trigger.
node_deleted_event = zkclient.handler.event_object()
node_deleted_event.clear()
@zkclient.DataWatch(zk_presence_path)
@exc.exit_on_unhandled
def _exit_on_delete(data, _stat, event):
"""Force exit if server node is deleted."""
if (data is None or
(event is not None and event.type == 'DELETED')):
# The node is deleted
node_deleted_event.set()
return False
else:
# Reestablish the watch.
return True
while not node_deleted_event.wait(_WATCHDOG_CHECK_INTERVAL):
# NOTE: The loop time above is tailored to the kernel watchdog time.
# Be very careful before changing it.
# Check our watchdogs
result = tm_env.watchdogs.check()
if result:
# Something is wrong with the node, shut it down
down_reason = 'watchdogs %r failed.' % result
break
return down_reason
|
|
#!/usr/bin/python
#
# Copyright 2016 Ansible by Red Hat
#
# This file is part of ansible-container
#
DOCUMENTATION = '''
module: k8s_route
short_description: Create or remove a route on a Kubernetes or OpenShift cluster.
description:
- Create or remove a route on a Kubernetes or OpenShift cluster by setting the C(state) to I(present) or I(absent).
- The module is idempotent and will not replace an existing route unless the C(reload) option is passed.
- Supports check mode. Use check mode to view a list of actions the module will take.
options:
'''
EXAMPLES = '''
'''
RETURN = '''
'''
import logging
import logging.config
from ansible.module_utils.basic import *
logger = logging.getLogger('oso_route')
LOGGING = (
{
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'ansible-container.log'
}
},
'loggers': {
'oso_route': {
'handlers': ['file'],
'level': 'DEBUG',
},
'container': {
'handlers': ['file'],
'level': 'DEBUG',
},
'compose': {
'handlers': [],
'level': 'INFO'
},
'docker': {
'handlers': [],
'level': 'INFO'
}
},
}
)
class RouteManager(object):
def __init__(self):
self.arg_spec = dict(
project_name=dict(type='str', aliases=['namespace']),
state=dict(type='str', choices=['present', 'absent'], default='present'),
labels=dict(type='dict'),
route_name=dict(type='str'),
host=dict(type='str'),
service_name=dict(type='str', required=True, aliases=['to']),
service_port=dict(type='str', required=True, aliases=['port']),
replace=dict(type='bool', default=False),
cli=dict(type='str', choices=['kubectl', 'oc'], default='oc'),
debug=dict(type='bool', default=False)
)
self.module = AnsibleModule(self.arg_spec,
supports_check_mode=True)
self.project_name = None
self.state = None
self.labels = None
self.route_name = None
self.host = None
self.service_name = None
self.service_port = None
self.replace = None
self.cli = None
self.api = None
self.debug = None
self.check_mode = self.module.check_mode
def exec_module(self):
for key in self.arg_spec:
setattr(self, key, self.module.params.get(key))
if self.debug:
LOGGING['loggers']['container']['level'] = 'DEBUG'
LOGGING['loggers']['oso_route']['level'] = 'DEBUG'
logging.config.dictConfig(LOGGING)
self.api = OriginAPI(self.module)
actions = []
changed = False
routes = dict()
results = dict()
project_switch = self.api.set_project(self.project_name)
if not project_switch:
actions.append("Create project %s" % self.project_name)
if not self.check_mode:
self.api.create_project(self.project_name)
if self.state == 'present':
route = self.api.get_resource('route', self.route_name)
if not route:
template = self._create_template()
changed = True
actions.append("Create route %s" % self.route_name)
if not self.check_mode:
self.api.create_from_template(template=template)
elif route and self.replace:
template = self._create_template()
changed = True
actions.append("Replace route %s" % self.route_name)
if not self.check_mode:
self.api.replace_from_template(template=template)
routes[self.route_name.replace('-', '_') + '_route'] = self.api.get_resource('route', self.route_name)
elif self.state == 'absent':
if self.api.get_resource('route', self.route_name):
changed = True
actions.append("Delete route %s" % self.route_name)
if not self.check_mode:
self.api.delete_resource('route', self.route_name)
results['changed'] = changed
if self.check_mode:
results['actions'] = actions
if routes:
results['ansible_facts'] = routes
self.module.exit_json(**results)
def _create_template(self):
'''
apiVersion: v1
kind: Route
metadata:
name: wordpress-wordpress
labels:
wordpress: wordpress
spec:
host: wordpress.local
to:
kind: Service
name: wordpress-wordpress
port:
targetPort: main
'''
template = dict(
apiVersion="v1",
kind="Route",
metadata=dict(
name=self.route_name,
),
spec=dict(
to=dict(
kind="Service",
name=self.service_name
),
port=dict(
targetPort=self.service_port
)
)
)
if self.host:
template['spec']['host'] = self.host
if self.labels:
template['metadata']['labels'] = self.labels
return template
#The following will be included by `ansble-container shipit` when cloud modules are copied into the role library path.
import re
import json
class OriginAPI(object):
def __init__(self, module, target="oc"):
self.target = target
self.module = module
@staticmethod
def use_multiple_deployments(services):
'''
Inspect services and return True if the app supports multiple replica sets.
:param services: list of docker-compose service dicts
:return: bool
'''
multiple = True
for service in services:
if not service.get('ports'):
multiple = False
if service.get('volumes_from'):
multiple = False
return multiple
def call_api(self, cmd, data=None, check_rc=False, error_msg=None):
rc, stdout, stderr = self.module.run_command(cmd, data=data)
logger.debug("Received rc: %s" % rc)
logger.debug("stdout:")
logger.debug(stdout)
logger.debug("stderr:")
logger.debug(stderr)
if check_rc and rc != 0:
self.module.fail_json(msg=error_msg, stderr=stderr, stdout=stdout)
return rc, stdout, stderr
def create_from_template(self, template=None, template_path=None):
if template_path:
logger.debug("Create from template %s" % template_path)
error_msg = "Error Creating %s" % template_path
cmd = "%s create -f %s" % (self.target, template_path)
rc, stdout, stderr = self.call_api(cmd, check_rc=True, error_msg=error_msg)
return stdout
if template:
logger.debug("Create from template:")
formatted_template = json.dumps(template, sort_keys=False, indent=4, separators=(',', ':'))
logger.debug(formatted_template)
cmd = "%s create -f -" % self.target
rc, stdout, stderr = self.call_api(cmd, data=formatted_template, check_rc=True,
error_msg="Error creating from template.")
return stdout
def replace_from_template(self, template=None, template_path=None):
if template_path:
logger.debug("Replace from template %s" % template_path)
cmd = "%s replace -f %s" % (self.target, template_path)
error_msg = "Error replacing %s" % template_path
rc, stdout, stderr = self.call_api(cmd, check_rc=True, error_msg=error_msg)
return stdout
if template:
logger.debug("Replace from template:")
formatted_template = json.dumps(template, sort_keys=False, indent=4, separators=(',', ':'))
logger.debug(formatted_template)
cmd = "%s replace -f -" % self.target
rc, stdout, stderr = self.call_api(cmd, data=formatted_template, check_rc=True,
error_msg="Error replacing from template")
return stdout
def delete_resource(self, type, name):
cmd = "%s delete %s/%s" % (self.target, type, name)
logger.debug("exec: %s" % cmd)
error_msg = "Error deleting %s/%s" % (type, name)
rc, stdout, stderr = self.call_api(cmd, check_rc=True, error_msg=error_msg)
return stdout
def get_resource(self, type, name):
result = None
cmd = "%s get %s/%s -o json" % (self.target, type, name)
logger.debug("exec: %s" % cmd)
rc, stdout, stderr = self.call_api(cmd)
if rc == 0:
result = json.loads(stdout)
elif rc != 0 and not re.search('not found', stderr):
error_msg = "Error getting %s/%s" % (type, name)
self.module.fail_json(msg=error_msg, stderr=stderr, stdout=stdout)
return result
def set_context(self, context_name):
cmd = "%s user-context %s" % (self.target, context_name)
logger.debug("exec: %s" % cmd)
error_msg = "Error switching to context %s" % context_name
rc, stdout, stderr = self.call_api(cmd, check_rc=True, error_msg=error_msg)
return stdout
def set_project(self, project_name):
result = True
cmd = "%s project %s" % (self.target, project_name)
logger.debug("exec: %s" % cmd)
rc, stdout, stderr = self.call_api(cmd)
if rc != 0:
result = False
if not re.search('does not exist', stderr):
error_msg = "Error switching to project %s" % project_name
self.module.fail_json(msg=error_msg, stderr=stderr, stdout=stdout)
return result
def create_project(self, project_name):
result = True
cmd = "%s new-project %s" % (self.target, project_name)
logger.debug("exec: %s" % cmd)
error_msg = "Error creating project %s" % project_name
self.call_api(cmd, check_rc=True, error_msg=error_msg)
return result
def get_deployment(self, deployment_name):
cmd = "%s deploy %s" % (self.target, deployment_name)
logger.debug("exec: %s" % cmd)
rc, stdout, stderr = self.call_api(cmd)
if rc != 0:
if not re.search('not found', stderr):
error_msg = "Error getting deployment state %s" % deployment_name
self.module.fail_json(msg=error_msg, stderr=stderr, stdout=stdout)
return stdout
def main():
manager = RouteManager()
manager.exec_module()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# When solving a bundle adjustment problem with pair-wise feature
# matches (i.e. no 3+ way matches), the bad matches will often find a
# zero error position when minimizing the mre, but they may be
# nonsensically far away from the other points. We can't catch every
# bad match this way, but we can get some that don't show up in the
# mre test.
# For each image, estimate the depth of each feature (i.e. distance
# from the camera.) Then compute an average depth and standard
# deviation. Leverage this to find depth outliers.
# Notes: this filter will work best for mostly nadir shots
# (vs. oblique angle shots) where the feature depth is more consistant
# throughout the image. However, for the use cases here, oblique
# shots tend to show up at the fringes of the data set due to turns
# and often have poor connectivity and aren't as useful anyway.
# at the moment my brain is not thinking too clearly, but this script
# is essentially computing a # of standard deviations from the mean
# error metric. But then in non-interacative mode it is doing statics
# on the metric and so we are culling 'n' standard deviations from the
# mean of standard deviations which probably does something, but is
# weird and I don't want to think about it right now!
import argparse
import pickle
import cv2
#import json
import math
import numpy as np
import os
import sys
sys.path.append('../lib')
import groups
import project
import match_culling as cull
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('project', help='project directory')
parser.add_argument('--stddev', type=float, default=3, help='how many standard deviations above the mean for auto discarding features')
parser.add_argument('--interactive', action='store_true', help='interactively review reprojection errors from worst to best and select for deletion or keep.')
args = parser.parse_args()
proj = project.ProjectMgr(args.project)
proj.load_images_info()
proj.load_features()
proj.undistort_keypoints()
source = 'matches_direct'
print("Loading matches:", source)
matches_orig = pickle.load( open( os.path.join(args.project, source), "rb" ) )
print('Number of original features:', len(matches_orig))
print("Loading optimized matches: matches_opt")
matches_opt = pickle.load( open( os.path.join(args.project, "matches_opt"), "rb" ) )
print('Number of optimized features:', len(matches_opt))
# load the group connections within the image set
group_list = groups.load(args.project)
print('Main group size:', len(group_list[0]))
# compute the depth of each feature for each image
def compute_feature_depths(image_list, group, matches):
print("Computing depths for all match points...")
# init structures
for image in image_list:
image.z_list = []
# make a list of distances for each feature of each image
for match in matches:
feat_ned = match[0]
count = 0
for m in match[1:]:
if m[0] in group:
count += 1
if count < 2:
continue
for m in match[1:]:
if m[0] in group:
image = image_list[m[0]]
cam_ned, ypr, quat = image.get_camera_pose(opt=True)
dist = np.linalg.norm(np.array(feat_ned) - np.array(cam_ned))
image.z_list.append(dist)
# compute stats
for image in image_list:
if len(image.z_list):
avg = np.mean(np.array(image.z_list))
std = np.std(np.array(image.z_list))
else:
avg = None
std = None
image.z_avg = avg
image.z_std = std
print(image.name, 'features:', len(image.z_list), 'avg:', avg, 'std:', std)
# make a list of relative depth errors corresponding to the
# matches list
error_list = []
for i, match in enumerate(matches):
feat_ned = match[0]
metric_sum = 0
count = 0
for p in match[1:]:
if p[0] in group:
image = image_list[p[0]]
count += 1
cam_ned, ypr, quat = image.get_camera_pose(opt=True)
dist = np.linalg.norm(np.array(feat_ned) - np.array(cam_ned))
dist_error = abs(dist - image.z_avg)
#dist_metric = dist_error / image.z_std
dist_metric = dist_error
metric_sum += dist_metric
if count >= 2:
metric_avg = metric_sum / count
error_list.append( [metric_avg, i, 0] )
# sort by error, worst is first
error_list = sorted(error_list, key=lambda fields: fields[0],
reverse=True)
return error_list
def mark_outliers(error_list, trim_stddev):
print("Marking outliers...")
sum = 0.0
count = len(error_list)
# numerically it is better to sum up a list of floatting point
# numbers from smallest to biggest (error_list is sorted from
# biggest to smallest)
for line in reversed(error_list):
sum += line[0]
# stats on error values
print(" computing stats...")
mean = sum / count
stddev_sum = 0.0
for line in error_list:
error = line[0]
stddev_sum += (mean-error)*(mean-error)
stddev = math.sqrt(stddev_sum / count)
print("mean = %.4f stddev = %.4f" % (mean, stddev))
# mark match items to delete
print(" marking outliers...")
mark_count = 0
for line in error_list:
# print "line:", line
if line[0] > mean + stddev * trim_stddev:
cull.mark_feature(matches_orig, line[1], line[2], line[0])
cull.mark_feature(matches_opt, line[1], line[2], line[0])
mark_count += 1
return mark_count
error_list = compute_feature_depths(proj.image_list, group_list[0], matches_opt)
if args.interactive:
# interactively pick outliers
mark_list = cull.show_outliers(error_list, matches_opt, proj.image_list)
# mark both direct and optimized match lists as requested
cull.mark_using_list(mark_list, matches_orig)
cull.mark_using_list(mark_list, matches_opt)
mark_sum = len(mark_list)
else:
# trim outliers by some # of standard deviations high
mark_sum = mark_outliers(error_list, args.stddev)
# after marking the bad matches, now count how many remaining features
# show up in each image
for image in proj.image_list:
image.feature_count = 0
for i, match in enumerate(matches_orig):
for j, p in enumerate(match[1:]):
if p[1] != [-1, -1]:
image = proj.image_list[ p[0] ]
image.feature_count += 1
# make a dict of all images with less than 25 feature matches
weak_dict = {}
for i, img in enumerate(proj.image_list):
# print img.name, img.feature_count
if img.feature_count > 0 and img.feature_count < 25:
weak_dict[i] = True
print('weak images:', weak_dict)
# mark any features in the weak images list
for i, match in enumerate(matches_orig):
#print 'before:', match
for j, p in enumerate(match[1:]):
if p[0] in weak_dict:
match[j+1] = [-1, -1]
mark_sum += 1
#print 'after:', match
if mark_sum > 0:
print('Outliers removed from match lists:', mark_sum)
result = input('Save these changes? (y/n):')
if result == 'y' or result == 'Y':
cull.delete_marked_features(matches_orig)
cull.delete_marked_features(matches_opt)
# write out the updated match dictionaries
print("Writing original matches...")
pickle.dump(matches_orig, open(os.path.join(args.project, source), "wb"))
print("Writing optimized matches...")
pickle.dump(matches_opt, open(os.path.join(args.project, "matches_opt"), "wb"))
|
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, layer_name, block_index, \
layer_input, layer_kernel, layer_stride, layer_padding, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
layer_kernel['layer%s.%s.conv1' %(layer_name, block_index)] = 3
layer_stride['layer%s.%s.conv1' %(layer_name, block_index)] = stride
layer_padding['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_kernel['layer%s.%s.conv2' %(layer_name, block_index)] = 3
layer_stride['layer%s.%s.conv2' %(layer_name, block_index)] = stride
layer_padding['layer%s.%s.conv2' %(layer_name, block_index)] = 1
self.layer_input = layer_input
self.layer_name = layer_name
self.block_index = block_index
# self.exist_downsample = False
def forward(self, x):
residual = x
self.layer_input['layer%s.%s.conv1' %(self.layer_name, self.block_index)] = x.data
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
self.layer_input['layer%s.%s.conv2' %(self.layer_name, self.block_index)] = out.data
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
self.layer_input['layer%s.%s.downsample.0' %(self.layer_name, self.block_index)] = x.data
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, layer_name, block_index, \
layer_input, layer_kernel, layer_stride, layer_padding, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
layer_kernel['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_stride['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_padding['layer%s.%s.conv1' %(layer_name, block_index)] = 1
layer_kernel['layer%s.%s.conv2' %(layer_name, block_index)] = 3
layer_stride['layer%s.%s.conv2' %(layer_name, block_index)] = stride
layer_padding['layer%s.%s.conv2' %(layer_name, block_index)] = 1
layer_kernel['layer%s.%s.conv3' %(layer_name, block_index)] = 1
layer_stride['layer%s.%s.conv3' %(layer_name, block_index)] = 1
layer_padding['layer%s.%s.conv3' %(layer_name, block_index)] = 1
self.layer_input = layer_input
self.layer_name = layer_name
self.block_index = block_index
def forward(self, x):
residual = x
self.layer_input['layer%s.%s.conv1' %(self.layer_name, self.block_index)] = x.data
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
self.layer_input['layer%s.%s.conv2' %(self.layer_name, self.block_index)] = out.data
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
self.layer_input['layer%s.%s.conv3' %(self.layer_name, self.block_index)] = out.data
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
self.layer_input['layer%s.%s.downsample.0' %(self.layer_name, self.block_index)] = x.data
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
# Modified by Chen Shangyu to get layer inputs
self.layer_input = dict()
self.layer_kernel = {'conv1': 7}
self.layer_stride = {'conv1': 2}
self.layer_padding = {'conv1': 3}
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], layer_name='1')
self.layer2 = self._make_layer(block, 128, layers[1], layer_name='2', stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], layer_name='3', stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], layer_name='4', stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, layer_name, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
self.layer_kernel['layer%s.0.downsample.0' %layer_name] = 1
self.layer_stride['layer%s.0.downsample.0' %layer_name] = stride
self.layer_padding['layer%s.0.downsample.0' %layer_name] = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
# def __init__(self, inplanes, planes, layer_name, block_index, \
# layer_input, layer_kernel, layer_stride, layer_padding, stride=1, downsample=None):
layers = []
layers.append(block(self.inplanes, planes, layer_name, block_index = 0,
layer_input = self.layer_input,
layer_kernel = self.layer_kernel,
layer_stride = self.layer_stride,
layer_padding = self.layer_padding,
stride = stride,
downsample = downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, layer_name, block_index = i,
layer_input = self.layer_input,
layer_kernel = self.layer_kernel,
layer_stride = self.layer_stride,
layer_padding = self.layer_padding))
return nn.Sequential(*layers)
def forward(self, x):
self.layer_input['conv1'] = x.data
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
self.layer_input['fc'] = x.data
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
|
# encoding: utf-8
import hashlib
import falcon
import ldap
import os
import random
import re
import string
from base64 import b64decode
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509.oid import NameOID
from datetime import datetime, date, timedelta
from ldap import modlist
from identidude import config
from identidude.decorators import serialize, login_required, apidoc, ldap_connect
from identidude.forms import validate, required, \
RE_USERNAME, RE_CHECKBOX, RE_DATE, RE_EMAIL, RE_PHONE
def serialize_subject(subj):
return "".join(["/%s=%s" % (j.oid._name, j.value) for j in subj])
def serialize_cert(cert):
common_name, = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
return dict(
common_name = common_name.value,
subject = serialize_subject(cert.subject),
issuer = serialize_subject(cert.issuer),
serial = "%x" % cert.serial,
signed = cert.not_valid_before,
expires = cert.not_valid_after)
def ad_time(b):
i = int(b)
if i == 9223372036854775807 or i == 0:
return None
return datetime.utcfromtimestamp(-11644473600 + (int(b) / 10000000.0)) # wth are you smoking guys 100ns intervals since 1601 jan 1
class ProfileResource(object):
@serialize
@ldap_connect
def on_get(self, req, resp, conn, username):
search_filter = '(&(objectClass=user)(objectCategory=person)(samaccountname=%s))' % username
attribs = 'mail', "mobile", 'userPrincipalName', \
'sAMAccountName', "sAMAccountType", \
"givenName", "sn", "userAccountControl", \
"memberOf", "primaryGroupID", \
"whenChanged", "whenCreated", "accountExpires", "pwdLastSet", "lastLogon", \
"userCertificate", "sshPublicKey", "otherMailbox"
r = conn.search_s(config.LDAP_BASEDN, ldap.SCOPE_SUBTREE, search_filter, attribs)
for dn, entry in r:
if not dn: continue
user = dict()
user["created"] = datetime.strptime(entry.get("whenCreated").pop().decode("utf-8"), "%Y%m%d%H%M%S.0Z")
user["changed"] = datetime.strptime(entry.get("whenChanged").pop().decode("utf-8"), "%Y%m%d%H%M%S.0Z")
user["expires"] = ad_time(entry.get("accountExpires").pop())
user["last_login"] = ad_time(entry.get("lastLogon").pop())
user["locked"] = bool(int(entry.get("userAccountControl")[0]) & 2)
user["mail"], = entry.get("mail", (None,))
user["mobile"], = entry.get("mobile", (None,))
user["name"], = entry.get("sAMAccountName")
user["normal"] = bool(int(entry.get("sAMAccountType")[0]) & 0x30000000)
user["password_set"] = ad_time(entry.get("pwdLastSet").pop())
user["ssh_keys"] = entry.get("sshPublicKey", ())
user["certificates"] = [
serialize_cert(x509.load_der_x509_certificate(j, default_backend()))
for j in entry.get("userCertificate", ())]
try:
user["recovery_mail"], = entry.get("otherMailbox")
except TypeError:
# No recovery e-mail configured
pass
try:
user["gn"], = entry.get("givenName")
user["sn"], = entry.get("sn")
except TypeError:
pass
if user["mail"]:
user["avatar"] = "https://www.gravatar.com/avatar/%s.jpg?s=32" % hashlib.md5(user["mail"]).hexdigest()
break
return user
@serialize
@login_required(delegate_credentials=True)
@ldap_connect
def on_put(self, req, resp, conn, username):
ssh_public_keys = [t.encode("ascii") for t in [s.strip() for s in req.get_param("ssh_public_keys", default="").split("\n")] if t]
gn = req.get_param("gn", required=True)
sn = req.get_param("sn", default="")
common_name = " ".join([gn, sn]).strip()
search_filter = '(&(objectClass=user)(objectCategory=person)(samaccountname=%s))' % username
attribs = "displayName", "givenName", "sn", "mail", "mobile", "c", "otherMailbox", "userAccountControl", "sshPublicKey"
for dn, current in conn.search_s(config.LDAP_BASEDN, 2, search_filter, attribs):
if not dn: continue
break
else:
raise falcon.HTTPNotFound()
account_control = int(current.get("userAccountControl")[0])
if req.get_param_as_bool("locked"):
account_control |= 2
else:
account_control &= 0xfffffffd
if req.get_param_as_bool("password_expires"):
account_control |= 0x10000
else:
account_control &= 0xfffeffff
attributes = [
("displayName", common_name),
("givenName", gn),
("sn", sn),
("mail", req.get_param("mail", default="")),
("mobile", req.get_param("mobile", default="")),
("c", req.get_param("c", default="")),
("otherMailbox", req.get_param("recovery_mail", default="")),
("userAccountControl", str(account_control))
]
delta = []
# Handle strings
for key, value in attributes:
old_value = current.get(key)
new_value = [value.encode("utf-8")]
if old_value == new_value:
continue
if key in current:
delta += [(1,key,None)]
if value:
delta += [(0,key,new_value)]
# Handle SSH keys
if set(current.get("sshPublicKey", ())) != set(ssh_public_keys):
if "sshPublicKey" in current:
delta += [(1,"sshPublicKey",None)]
if ssh_public_keys:
delta += [(0,"sshPublicKey", ssh_public_keys)]
# Handle password
if req.get_param("password"):
delta += [(1,"unicodePwd",None),(0,"unicodePwd",("\"%s\"" % req.get_param("password")).encode("utf-16-le"))]
if delta:
try:
conn.modify_s(dn, delta)
except ldap.LDAPError as e:
raise falcon.HTTPBadRequest(e.args[0]["desc"], e.args[0]["info"])
return {}
@serialize
@login_required(delegate_credentials=True)
@ldap_connect
def on_delete(self, req, resp, conn, username):
search_filter = '(&(objectClass=user)(objectCategory=person)(samaccountname=%s))' % username
r = conn.search_s(config.LDAP_BASEDN, 2, search_filter, [])
for dn, entry in r:
if not dn: continue
try:
conn.delete_s(dn)
except ldap.LDAPError as e:
raise falcon.HTTPBadRequest(e.message.get("info"), e.message.get("desc"))
@apidoc
class UserListResource:
@serialize
@login_required(delegate_credentials=True)
@ldap_connect
def on_post(self, req, resp, conn):
req._parse_form_urlencoded() # Merge POST-ed stuff to get_param
certificates = req.get_param_as_list("certificates") or ()
username = req.get_param("name", required=True)
gn = req.get_param("gn", required=True)
sn = req.get_param("sn")
common_name = " ".join([gn, sn])
dn = "cn=%s,cn=Users,%s" % (common_name, config.LDAP_BASEDN)
upn = "%s@%s" % (username, config.REALM.lower())
pwd = req.get_param("password")
# Make sure we're not getting hacked
RESERVED_GROUPS = set(["root", "audio", "video", "wheel", "sudo", \
"admin", "daemon", "bin", "lp", "pulse", "lightdm", "dnsmasq", \
"nobody", "nogroup", "shadow", "kvm", "tape", "floppy", "cdrom", \
"nslcd", "proxy", "man", "news", "tty", "adm", "disk"])
if username in RESERVED_GROUPS: # TODO: Use better HTTP status code
click.echo("Username %s is reserved" % subject_username)
raise falcon.HTTPConflict("Error", "Username is reserved")
ldif_user = modlist.addModlist({
"displayName": common_name.encode("utf-8"),
"samaccountname": username.encode("utf-8"),
"givenName": gn.encode("utf-8"),
"sn": sn.encode("utf-8"),
"c": req.get_param("c", default="").encode("utf-8"),
#"birthdate": req.get_param("birthday", default="").encode("utf-8"),
#"gender": req.get_param("gender", default="").encode("utf-8"),
"otherMailbox": req.get_param("mail").encode("utf-8"),
"mail": ("%s@%s" % (username, config.MAIL_DOMAIN)).encode("utf-8"),
"unicodePwd": ("\"%s\"" % pwd).encode("utf-16-le") if pwd else b"",
"userAccountControl": b"544",
"userPrincipalName": upn.encode("utf-8"),
"objectclass": [b"top", b"person", b"organizationalPerson", b"user"],
"userCertificate": [b64decode(j) for j in certificates]
if req.get_param_as_bool("import_certificates") else [],
#"altSecurityIdentities": TODO
})
try:
conn.add_s(dn, ldif_user)
except ldap.ALREADY_EXISTS:
raise falcon.HTTPConflict("Error", "User with such full name already exists")
|
|
"""Supporting definitions for the Python regression tests."""
import sys
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
import os
for dirname in sys.path:
try:
os.unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
except os.error:
pass
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
try:
os.unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
except os.error:
pass
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
is_jython = sys.platform.startswith('java')
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
try:
os.unlink(TESTFN)
except:
pass
del os, fp
from os import unlink
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed, "%r == %r" % (a, b)
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
#=======================================================================
# Preliminary PyUNIT integration.
import unittest
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def run_suite(suite, testclass=None):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
if testclass is None:
msg = "errors occurred; run in verbose mode for details"
else:
msg = "errors occurred in %s.%s" \
% (testclass.__module__, testclass.__name__)
raise TestFailed(msg)
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, (unittest.TestSuite, unittest.TestCase)):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
if len(classes)==1:
testclass = classes[0]
else:
testclass = None
run_suite(suite, testclass)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
|
|
""" Utilities for dealing with uploading to S3. """
import StringIO
import gzip
import boto
from django.conf import settings
from zope.interface import Interface, implements
from go.errors import VumiGoError
class BucketError(VumiGoError):
""" Raised when an error occurs during an operation on a bucket. """
class KeyAlreadyExistsError(BucketError):
""" Raised when an S3 key unexpectedly already exists. """
class BucketConfig(object):
""" Helper for accessing Django GO_S3_BUCKET settings. """
def __init__(self, config_name):
self.config_name = config_name
def __getattr__(self, name):
bucket_config = settings.GO_S3_BUCKETS.get(self.config_name, {})
# We set defaults for "proxy", "proxy_port", and "is_secure" because we
# override them in tests to use an in-process moto fake instead of
# hitting S3 for real.
defaults = {
"proxy": None,
"proxy_port": None,
"is_secure": True,
}
defaults.update(settings.GO_S3_BUCKETS.get('defaults', {}))
if name in bucket_config:
return bucket_config[name]
if name in defaults:
return defaults[name]
raise AttributeError(
"BucketConfig %r has no attribute %r" % (self.config_name, name))
class IMultipartWriter(Interface):
def push_chunks(chunks):
"""
Push an iterator over chunks of data and yield files for multipart
uploading.
:param iter chunks:
An iterator over chunks of bytes.
:returns iter:
Returns an iterator over file-like objects, each of which is
a file part to upload.
"""
class MultipartWriter(object):
""" Helper for writing pending chunks of data. """
implements(IMultipartWriter)
def __init__(self, minimum_size=5 * 1024 * 1024):
self.minimum_size = minimum_size
self._clear_pending()
def _clear_pending(self):
self._pending = []
self._pending_size = 0
def _ready(self):
return self._pending_size >= self.minimum_size
def _empty(self):
return not bool(self._pending)
def _push_chunk(self, chunk):
self._pending.append(chunk)
self._pending_size += len(chunk)
def _pop_part(self):
fp = StringIO.StringIO("".join(self._pending))
self._clear_pending()
return fp
def push_chunks(self, chunks):
for chunk in chunks:
self._push_chunk(chunk)
if self._ready():
yield self._pop_part()
if not self._empty():
yield self._pop_part()
class GzipMultipartWriter(object):
""" Helper for tracking and compressing pending chunks of data. """
implements(IMultipartWriter)
def __init__(self, minimum_size=5 * 1024 * 1024):
self.minimum_size = minimum_size
self._string_file = StringIO.StringIO()
self._gzip_file = gzip.GzipFile(fileobj=self._string_file, mode='w')
def _clear_pending(self):
self._string_file.seek(0)
self._string_file.truncate()
def _ready(self):
return self._string_file.tell() >= self.minimum_size
def _empty(self):
return not bool(self._string_file.tell())
def _pop_part(self):
fp = StringIO.StringIO(self._string_file.getvalue())
self._clear_pending()
return fp
def push_chunks(self, chunks):
for chunk in chunks:
self._gzip_file.write(chunk)
if self._ready():
yield self._pop_part()
self._gzip_file.close()
if not self._empty():
yield self._pop_part()
class Bucket(object):
""" An S3 bucket.
:param str config_name:
The name of the bucket config.
Bucket configuration is defined via Django settings as follows:
::
GO_S3_BUCKETS = {
'defaults': {
'aws_access_key_id': 'MY-ACCESS-KEY-ID',
'aws_secret_access_key': 'SECRET',
},
'billing.archive': {
's3_bucket_name': 'go.vumi.org.billing.archive',
},
}
"""
def __init__(self, config_name):
self.config = BucketConfig(config_name)
def _s3_conn(self):
return boto.connect_s3(
self.config.aws_access_key_id, self.config.aws_secret_access_key,
proxy=self.config.proxy, proxy_port=self.config.proxy_port,
is_secure=self.config.is_secure)
def get_s3_bucket(self):
""" Return an S3 bucket object. """
conn = self._s3_conn()
return conn.get_bucket(self.config.s3_bucket_name)
def create(self):
""" Create the S3 bucket. """
conn = self._s3_conn()
return conn.create_bucket(self.config.s3_bucket_name)
def upload(self, key_name, chunks, headers=None, metadata=None,
gzip=False, replace=False):
""" Upload chunks of data to S3.
:param str key_name:
Key to upload to.
:param iter chunks:
Iterator over chunks of bytes to upload.
:param dict headers:
Dictionary of HTTP headers to upload with the file.
:param dict metadata:
Dictionary of S3 metadata to upload with the file.
Content-Type and Content-Encoding are copied from ``headers``.
:param bool gzip:
Whether to gzip the data before uploading it. Automatically
sets the Content-Encoding to ``gzip``.
:param bool replace:
Whether to allow an existing file to be replaced.
"""
bucket = self.get_s3_bucket()
if headers is None:
headers = {}
if metadata is None:
metadata = {}
if gzip:
writer = GzipMultipartWriter()
headers['Content-Encoding'] = 'gzip'
else:
writer = MultipartWriter()
for field in ('Content-Type', 'Content-Encoding'):
if field in headers:
metadata[field] = headers[field]
if not replace and bucket.get_key(key_name) is not None:
raise KeyAlreadyExistsError(
"Key %r already exists in bucket %r" % (key_name, bucket.name))
mp = bucket.initiate_multipart_upload(
key_name, headers=headers, metadata=metadata)
try:
for part_num, part in enumerate(writer.push_chunks(chunks)):
mp.upload_part_from_file(part, part_num=part_num + 1)
except:
mp.cancel_upload()
raise
else:
mp.complete_upload()
|
|
# This file is a part of OMPC (http://ompc.juricap.com/)
#
# for testing:
# import ompclib_numpy; reload(ompclib_numpy); from ompclib_numpy import *
# TODO
# - remove all references to array, use "ompc_base._init_data" instead
import sys
from itertools import izip as _izip, cycle as _cycle, repeat as _repeat
import numpy as np
import pylab as mpl
OMPCSEMI = Ellipsis
OMPCEND = None
end = OMPCEND
_dtype2numpy = {'double': 'f8', 'single': 'f4',
'int32': 'i4', 'uint32': 'u4',
'int16': 'i2', 'uint16': 'u2',
'int8': 'i1', 'uint8': 'u1',
'bool': 'bool',
}
class mvar(object):
@staticmethod
def _DataObject(dtype, data):
return np.array(data, dtype=_dtype2numpy[dtype])
def __new__(cls, *args, **kwargs):
a = super(mvar, cls).__new__(cls, *args, **kwargs)
a._a = None
a.dtype = 'double'
a.msize = (0, 0)
return a
def _init_data(self, dtype, msize, data):
self.dtype = dtype
self.msize = msize
self._a = self._DataObject(dtype, data)
def __call__(self, *i):
mview = self.__getitem1__(i)
mview.__ompc_view__ = _mview(self, i, False)
return mview
def _ctypes_get(self):
return self._a.ctypes
ctypes = property(_ctypes_get, None, None,
"Ctypes-wrapped data object.")
def _lvalue_set(self, val):
assert hasattr(self, '__ompc_view__')
o = self.__ompc_view__
# FIXME: o.linear
o.viewed.__setitem1__(o.ins, val)
lvalue = property(None, _lvalue_set, None, "")
def __copy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
class _mview(mvar):
def __init__(self, viewed, ins, linear):
self.viewed = viewed
self.ins = ins
self.linear = linear
def __repr__(self):
return "_mview(%r, %r, %r)"%(self.viewed, self.ins, self.linear)
def __str__(self):
return "<view of %r>"%(self.viewed)
class _el:
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def __pow__(self, right):
if self.left is None: return _el(right=right)
return self.left.__elpow__(right)
def __rpow__(self, left):
if self.right is None: return _el(left=left)
return left.__elpow__(self.right)
def __mul__(self, right):
if self.left is None: return _el(right=right)
return self.left.__elmul__(right)
def __rmul__(self, left):
if self.right is None: return _el(left=left)
return left.__elmul__(self.right)
elpow = _el()
elmul = _el()
def _dsize(dtype):
return _dsize_dict[dtype]
def _flatten(seq):
for item in seq:
if _isscalar(item) and not hasattr(item, '__len__'):
yield item
else:
for subitem in _flatten(item):
yield subitem
def _isscalar(A):
if hasattr(A, '__len__') and len(A) > 1:
return False
elif hasattr(A, '__getitem__'):
try: A[1]
except: return True
else: return False
elif hasattr(A, '__iter__'):
return False
# doesn't have length nor multiple elements and doesn't support iteration
return True
def _typegreater_(Adt, Bdt):
"""Returns type with higher precision."""
if isinstance(Adt, _marray): Adt = Adt.dtype
if isinstance(Bdt, _marray): Bdt = Bdt.dtype
return _dsize_dict[Adt] >= _dsize_dict[Bdt] and Adt or Bdt
def _typegreater(Adt, Bdt):
"""Returns type with higher precision."""
return _dsize_dict[Adt] >= _dsize_dict[Bdt] and Adt or Bdt
def _size(X, d=None):
if isinstance(X, _marray):
res = X.msize
else:
from operator import isSequenceType
shp = []
while isSequenceType(X):
shp.append(len(X))
X = X[0]
res = tuple(reversed(shp))
# minimum shape is 2 dimensional
if len(res) == 1:
res = (1, res[0])
if d is None:
return res
else:
return res[d]
def _ndshape(msize, *i):
"""Determine the shape of a view on A with slicing specified in `i`.
"""
shp = []
for idim, x in enumerate(i):
if isinstance(x, slice):
start, stop, step = x.start, x.stop, x.step
if x.start is None: start = 0
if x.stop == sys.maxint or x.stop is None: stop = msize[idim]
if x.step is None: step = 1
shp.append( len(range(start,stop,step)) )
elif _isscalar(x):
shp.append(1)
elif hasattr(x, '__len__'):
shp.append(len(x))
else:
raise NotImplementedError()
if len(shp) == 1: shp[:0] = [1]
return shp
def _ndshape1(msize, *i):
"""Determine shape of a view on size msize with slicing specified in `i`.
"""
shp = []
for idim, x in enumerate(i):
if isinstance(x, _mslice):
if x.hasnoend():
shp.append( len(mslice[x.start:x.step:msize[idim]]) )
else:
shp.append( len(x) )
elif _isscalar(x):
shp.append(1)
elif hasattr(x, '__len__'):
shp.append(len(x))
else:
if isinstance(x, slice):
raise NotImplementedError()
shp.append(mrange(x))
else:
raise NotImplementedError()
#if len(shp) == 1: shp[:0] = [1]
if len(shp) == 1:
if msize[0] == 1: shp[:0] = [1]
else: shp.append(1)
return shp
def isempty(A):
return np.prod(A.msize) == 0
def _dot(A, B):
if not isinstance(A, _marray) or not isinstance(B, _marray):
raise NotImplementedError("arguments must be 'marray's.")
return np.dot(B.reshape(B.msize), A).T
def _squeeze(A):
res = A.__copy__()
res.msize = [ x for x in res.msize if x > 1 ]
return res
def _msize(*args):
if len(args) == 1 and hasattr(args, '__len__'):
args = args[0]
if len(args) > 2 and args[-1] == 1: args = args[:-1]
if len(args) == 1:
if construct: args = (args[0], args[0])
else: args = (args[0], 1)
return args
def print_marray(A, ans=True):
pre = ''
if ans:
pre = '\nans = \n\n'
if len(A.msize) > 2:
for i in _ndi(*[slice(0,x) for x in A.msize[2:]]):
pre += '(:, :, %s)\n\n'%', '.join([str(x+1) for x in i])
cur = (slice(0,A.msize[0]), slice(0, A.msize[1])) + i
sA = A.__getitem__(cur)
sA.msize = A.msize[:2]
pre += print_marray(sA, False)
return pre
else:
return str(A._a.T) + '\n\n'
class _marray(mvar):
@staticmethod
def empty(shp, dtype):
return _marray(dtype, shp)
@staticmethod
def zeros(shp, dtype):
na = _marray(dtype, shp)
na._a.flat[:] = 0 #np.zeros(na.msize[::-1], _dtype2numpy[dtype])
#na.msize = shp
return na
@staticmethod
def ones(shp, dtype):
na = _marray(dtype, shp)
na._a.flat[:] = 1 #np.ones(na.msize[::-1], _dtype2numpy[dtype])
#na.msize = shp
return na
def __init__(self, dtype, msize, a=None):
from operator import isSequenceType
if not isSequenceType(msize):
msize = (msize, msize)
elif len(msize) == 1:
msize = (msize[0], 1)
if a is None:
self._a = np.empty(msize[::-1], _dtype2numpy[dtype])
elif isinstance(a, np.ndarray):
self._a = a
else:
self._a = np.array(a, _dtype2numpy[dtype]).reshape(msize[::-1])
self.msize = msize
self.dtype = dtype
def __copy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
return _marray(self.dtype, self.msize, self._a.copy())
# operators
def __elpow__(self, him):
if isinstance(him, _marray): him = him._a
return _marray(self.dtype, self.msize, self._a**him)
def __elmul__(self, him):
if isinstance(him, _marray): him = him._a
return _marray(self.dtype, self.msize, self._a*him)
def __mul__(self, right):
if len(self.msize) != 2:
# FIXME
raise OMPCError('??? Error using ==> mtimes\n'
'Input arguments must be 2-D')
# if multiplying with _el object, call the elementwise operation
if isinstance(right, _el): return _el(left=self)
elif _isscalar(right): return self.__elmul__(right)
# matrix multiplication
return _dot(self, right)
def __rmul__(self, left):
# if multiplying with _el object, call the elementwise operation
if isinstance(left, _el): return _el(right=self)
elif _isscalar(left): return self.__elmul__(left)
# matrix multiplication
return _dot(left, self)
def __add__(self, him):
if isinstance(him, _marray): him = him._a
return _marray(self.dtype, self.msize, self._a+him)
__radd__ = __add__
def __sub__(self, him):
if isinstance(him, _marray): him = him._a
return _marray(self.dtype, self.msize, self._a-him)
def __rsub__(self, him):
if isinstance(him, _marray): him = him._a
return _marray(self.dtype, self.msize, him-self._a)
def __neg__(self):
return _marray(self.dtype, self.msize, -self._a)
# comparisons
def __ge__(self, other):
if isinstance(other, _marray):
other = other._a
return _marray('bool', self.msize, self._a >= other)
# element access
def __iter__(self):
return iter(self._a)
def __len__(self):
return max(self.msize)
def __getitem__(self, i):
# determine the size of the new array
nshp = _ndshape(self.msize, *i)
return _marray(self.dtype, nshp, self._a.__getitem__(reversed(i)))
# >> a = reshape(1:15,5,3)
# >> a(eye(3)==1)
# ans = [1, 5, 9]
def __getitem1__(self, i):
# determine the size of the new array
nshp = _ndshape1(self.msize, *i)
ri = []
if len(i) == 1:
if self.msize[0] == 1: ri = (i[0]._a.astype('i4').reshape(-1)-1, 0)
elif self.msize[1] == 1: ri = (0, i[0]._a.astype('i4').reshape(-1)-1)
else:
raise NotImplementedError()
else:
di = len(self.msize)-1
for x in reversed(i):
if isinstance(x, _marray): ri.append(x._a.astype('i4').reshape(-1)-1)
elif isinstance(x, _mslice): ri.append(x.__base0__(self.msize[di]))
else: ri.append(x-1)
di -= 1
na = self._a.__getitem__(ri)
return _marray(self.dtype, nshp, na.reshape(nshp[::-1]))
def __setitem__(self, i, val):
if isinstance(val, _marray): val = val._a
ins = list(_ndilin(self.msize, *ri))
self._a.__setitem__(reversed(i), val)
def __setitem1__(self, i, val):
# determine the size of the new array
nshp = _ndshape1(self.msize, *i)
if isinstance(val, _marray): val = val._a
ri = []
if len(i) == 1:
# stupid numpy a = rand(1,10); b = rand(1,2); a[0,[3,4]] = b
# doesn't work
if self.msize[0] == 1:
ri = (i[0]._a.astype('i4').reshape(-1)-1, 0)
val = val[0]
elif self.msize[1] == 1:
ri = (0, i[0]._a.astype('i4').reshape(-1)-1)
val = val[0]
else:
raise NotImplementedError()
else:
di = len(self.msize)-1
for x in reversed(i):
if isinstance(x, _marray): ri.append(x._a.astype('i4').reshape(-1)-1)
elif isinstance(x, _mslice): ri.append(x.__base0__(self.msize[di]))
else: ri.append(x-1)
di -= 1
self._a.__setitem__(ri, val)
# properties
def transposed(self):
assert len(self.msize) == 2
return _marray(self.dtype, self.msize[::-1],
self._a.T.flat.copy())
T = property(transposed, None, None, "Transpose.")
# IO
def __str__(self):
return print_marray(self)
def __repr__(self):
return "marray(%r, %r)"%(self.dtype, self.msize)
# from the end of
# http://code.activestate.com/recipes/52558/
class _MEnd(object):
'''This object serves as an emulator of the "end" statement of MATLAB.
We want to use the "is" operator therefore we need a singletion.'''
__instance = None # the unique instance
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
object.__init__(cls.__instance)
return cls.__instance
def __init__(self):
# prevent the automatic call of object's __init__, it is init-ed once
# in the __new__ function
pass
def __repr__(self):
return 'end'
def __str__(self):
return '(m-end object)'
def __int__(self):
return sys.maxint
end = _MEnd()
def _mslicelen(start, stop, step):
if stop is end or stop is None:
return sys.maxint
return int(np.floor(stop-start)/step + 1)
class _mslice(mvar):
"""m-slice MATLAB style slice object.
You can instantiate this class only by the helper mslice:
>>> mslice[1:10]
"""
def __init__(self, start, stop=None, step=None):
raise NotImplementedError("Direct instantiation is not allowed.")
def init(self, start, stop, step):
if start is None: start = 1
if step is None: step = 1
self.start = start
self.stop = stop
self.step = step
self.dtype = 'double'
self.msize = (1, _mslicelen(self.start, self.stop, self.step))
def init_data(self):
if self._a is None:
self._a = np.array(list(self), dtype='f8')
def evaluate_end(self, i):
start = self.start
step = self.step
stop = self.stop
if stop is end:
return mslice[start:step:i]
else:
return self
def _ctypes_get(self):
# Create and initialize a real data buffer, then let the default
# function to return the ctypes pointer
if self.stop is end:
raise RuntimeError("Infinite slice can be only used as an index.")
# return None
self.init_data()
return self._a.ctypes
ctypes = property(_ctypes_get, None, None,
"Ctypes-wrapped data object.")
def __iter__(self):
value = self.start
while value <= self.stop:
yield float(value)
value += self.step
def __getitem__(self, i):
if isinstance(i, slice):
raise NotImplemented
retval = self.start + i*self.step
if retval > self.stop:
raise IndexError
return retval
def __len__(self):
if self.stop is end:
# FIXME: how should this be done
# raise AssertionError("This is impossible for a code translated "
# "from a functional MATLAB code.")
# Python allows returning of positive integers only!
return sys.maxint
return _mslicelen(self.start, self.stop, self.step)
def __repr__(self):
return 'mslice[%r:%r:%r]'%\
(self.start, self.step, self.stop)
def __str__(self):
if self.stop is None:
it = iter(self)
return ', '.join( str(it.next()) for i in xrange(3) ) + ' ...'
elif len(self) > 10:
it = iter(self)
retval = self.__repr__() + '\n'
retval += ', '.join( str(it.next()) for i in xrange(3) ) + ' ... '
lastval = self.start + (len(self)-1)*self.step
return retval + str(lastval)
return ', '.join( map(str, self) )
def hasnoend(self):
'Returns true if "self.stop is end".'
return self.stop is end
def __copy__(self):
self.init_data()
return _marray(self.dtype, self.msize, self._a.copy())
def __deepcopy__(self):
self.init_data()
return _marray(self.dtype, self.msize, self._a.copy())
def __base0__(self,shp=None):
if self.hasnoend():
assert shp is not None
return slice(self.start-1, shp, self.step)
return slice(self.start-1, self.stop, self.step)
class _mslice_helper:
def __getitem__(self, i):
s = _mslice.__new__(_mslice)
# FIXME: there is no way of differentiating between mslice[:]
# and mslice[0:], the second will never appear in a code written for
# MATLAB.
# !!! actually, maybe possible by look-back in the stack ?!!
start, stop, step = i.start, end, 1
if i.step is None:
# there are only 2 arguments, stop is i.stop
if i.start == 0 and i.stop == sys.maxint:
# a special case
start = 1
elif i.stop == sys.maxint:
# this is what happens when syntax [start:] is used
raise IndexError(
'Use 2- and 3-slices only. Use "end" instead of "None".')
else: stop = i.stop
else:
# there are all 3 arguments, stop is actually i.step
# 1:2:10 -> slice(1,2,10) -> mslice(1,10,2)
stop = i.step
step = i.stop
s.init(start, stop, step)
return s
mslice = _mslice_helper()
class mstring(mvar):
def __init__(self, s):
mvar.__init__(self)
self.dtype = 'char'
self.msize = (1, len(s))
self._a = s
def __len__(self):
return len(self._a)
def __str__(self):
return self._a
def __repr__(self):
return 'mstring(%r)'%self._a
def doublestr(x,prec=4):
try:
float(x)
except:
return x
else:
return str(round(x,4))
def _m_constructor_args(*X):
from operator import isSequenceType
dtype = 'double'
if type(X[-1]) is str:
dtype = X[-1]
X = X[:-1]
if len(X) == 1 and isSequenceType(X):
X = X[0]
return X, dtype
def empty(*X):
# check for class
X, dt = _m_constructor_args(*X)
return _marray.empty(X, dt)
def zeros(*X):
# check for class
X, dt = _m_constructor_args(*X)
return _marray.zeros(X, dt)
def ones(*X):
# check for class
X, dt = _m_constructor_args(*X)
return _marray.ones(X, dt)
def mcat(i):
"""Concatenate a list of matrices into a single matrix using separators
',' and ';'. The ',' means horizontal concatenation and the ';' means
vertical concatenation.
"""
if i is None:
return marray()
# calculate the shape
rows = [[]]
final_rows = 0
final_cols = 0
crows = ccols = 0
pos = []
pos2 = []
for x in i:
#if x == ';':
if x is Ellipsis:
rows.append([])
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
ccols = 0
pos.append(Ellipsis)
else:
shp = x.msize
if len(shp) < 1: shp = [0]
if len(shp) < 2: shp += [0]
rows[-1].append(shp[0])
pos.append( (slice(final_rows, final_rows+shp[0]),
slice(ccols, ccols+shp[1])) )
crows = shp[0]
ccols += shp[1]
if final_cols > 0 and final_cols != ccols:
error("Incompatible shapes!")
else:
final_cols = ccols
final_rows += crows
out = empty((final_rows, final_cols), 'double')
for sl, x in _izip(pos, i):
if x is not Ellipsis:
if isinstance(x, _marray): x = x._a.T
out._a.reshape(final_cols, final_rows).T.__setitem__(sl, x)
return out
def size(X):
return X.msize
def rand(*args):
if isinstance(args[0], str):
raise NotImplemented
if len(args) == 1:
args = (args[0], args[0])
return _marray('double', args, np.random.rand(*args[::-1]))
def randn(*args):
if isinstance(args[0], str):
raise NotImplemented
if len(args) == 1:
args = (args[0], args[0])
return _marray('double', args, np.random.randn(*args[::-1]))
def reshape(A, *newsize):
if len(newsize) == 0:
raise OMPCError('??? Error using ==> reshape\n'
'Not enough input arguments.')
if len(newsize) == 1 and hasattr(newsize, '__len__'):
newsize = newsize[0]
out = A.__copy__()
if not np.prod(A.msize) == np.prod(newsize):
raise OMPCError('??? Error using ==> reshape\n'
'To RESHAPE the number of elements must not change.')
out.msize = newsize
return out
def sum(A, *dimtype):
restype = 'double'
dim = 1
if len(dimtype) == 2:
dim = dimtype[0]
dimtype = dimtype[1]
elif len(dimtype) == 1:
dimtype = dimtype[0]
if isinstance(dimtype, str):
if dimtype == 'native':
restype = A.dtype
else:
restype = dimtype
else:
dim = dimtype
msize = A.msize
if A.msize[dim-1] == 1:
return A.__copy__()
nshp = list(msize)
nshp[dim-1] = 1
if len(nshp) > 2 and nshp[-1] == 1: nshp = nshp[:-1]
# use numpy's sum
a = np.sum(A._a, len(msize)-dim)
return _marray(A.dtype, nshp, a)
def find(cond):
a = mpl.find(cond._a.reshape(-1)) + 1
msize = (len(a), 1)
if len(cond.msize) == 2 and cond.msize[0] == 1:
msize = (1, len(a))
return _marray('double', msize, a.astype('f8').reshape(msize[::-1]))
def plot(*args):
#print [ x.msize for x in args ]
nargs = []
for x in args:
if isinstance(x, _marray): nargs.append(x._a.T)
elif isinstance(x, mstring): nargs.append(str(x))
else: nargs.append(x)
mpl.plot(*nargs)
mpl.show()
|
|
#!/usr/bin/python3
#
# Copyright (c) 2013 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import gzip
import math
import random
import sys
from typing import Dict, Iterator, List, Optional, Tuple, cast
from paleomix.common.formats.fasta import FASTA
from paleomix.common.sampling import weighted_sampling
from paleomix.common.sequences import reverse_complement
from paleomix.common.utilities import fragment
def _dexp(lambda_value: float, position: int) -> float:
return lambda_value * math.exp(-lambda_value * position)
def _rexp(lambda_value: float, rng: random.Random) -> float:
return -math.log(rng.random()) / lambda_value
def toint(value: float) -> int:
return int(round(value))
# Adapter added to the 5' end of the forward strand (read from 5' ...)
PCR1 = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC%sATCTCGTATGCCGTCTTCTGCTTG"
# Adapter added to the 5' end of the reverse strand (read from 3' ...):
# rev. compl of the forward
PCR2 = "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT"
def _get_indel_length(indel_lambda: float, rng: random.Random) -> int:
return 1 + toint(_rexp(indel_lambda, rng))
def _get_weighted_choices(rng: random.Random, sub_rate: float, indel_rate: float):
choices_by_nt: Dict[str, Iterator[str]] = {}
for src_nt in "ACGT":
choices = "ACGTID"
probs = [sub_rate / 4] * 4 # ACGT
probs += [indel_rate / 2] * 2 # ID
probs[choices.index(src_nt)] = 1 - sum(probs) + sub_rate / 4
choices_by_nt[src_nt] = weighted_sampling(choices, probs, rng)
return choices_by_nt
def _mutate_sequence(
rng: random.Random,
choices: Dict[str, Iterator[str]],
refseq: str,
indel_lambda: float = 0,
) -> Tuple[str, List[int]]:
position = 0
sequence: List[str] = []
positions: List[int] = []
while position < len(refseq):
ref_nt = refseq[position]
if ref_nt not in "ACGT":
read_nt = rng.choice("ACGT")
else:
read_nt = next(choices[ref_nt])
if read_nt == "D":
for _ in range(_get_indel_length(indel_lambda, rng)):
position += 1
elif read_nt == "I":
for _ in range(_get_indel_length(indel_lambda, rng)):
sequence.append(rng.choice("ACGT"))
positions.append(position)
else:
sequence.append(read_nt)
positions.append(position)
position += 1
return "".join(sequence), positions
class Args: # (argparse.Namespace):
fasta: str
output_prefix: str
barcode: str
specimen_seed: Optional[int]
specimen_sub_rate: float
specimen_indel_rate: float
specimen_indel_lambda: float
sample_seed: int
sample_frag_len_mu: int
sample_frag_len_sigma: int
sample_frag_len_min: int
sample_frag_len_max: int
sample_endog_mu: float
sample_endog_sigma: float
damage: bool
damage_seed: Optional[int]
damage_lambda: float
library_seed: Optional[int]
library_pcr_lambda: float
library_barcode: Optional[str]
lanes_num: int
lanes_reads_mu: int
lanes_reads_sigma: int
lanes_per_file: int
reads_sub_rate: float
reads_indel_rate: float
reads_indel_lambda: float
reads_len: int
class Specimen:
"""Represents a specimen, from which samples are derived.
These are mutated by the addition of changes to the sequence
"""
def __init__(self, options: Args, filename: str):
genome = list(FASTA.from_file(filename))
assert len(genome) == 1, len(genome)
self._genome = genome[0].sequence.upper()
rng = random.Random(options.specimen_seed)
choices = _get_weighted_choices(
rng, options.specimen_sub_rate, options.specimen_indel_rate
)
self._sequence, self._positions = _mutate_sequence(
rng, choices, self._genome, options.specimen_indel_lambda
)
@property
def sequence(self):
return self._sequence
@property
def positions(self):
return self._positions
class Sample:
def __init__(self, options: Args, specimen: Specimen):
self._specimen = specimen
self._random = random.Random(options.sample_seed)
self._options = options
frac_endog = self._random.gauss(
options.sample_endog_mu, options.sample_endog_sigma
)
self._frac_endog = min(1, max(0.01, frac_endog))
self._endog_id = 0
self._contam_id = 0
def get_fragment(self) -> Tuple[bool, str, str]:
"""Returns either a DNA fragmnet, representing either a fragment of
the sample genome, or a randomly generated DNA sequence representing
contaminant DNA that is not related to the species."""
if self._random.random() <= self._frac_endog:
return self._get_endogenous_sequence()
return self._get_contaminant_sequence()
def _get_contaminant_sequence(self) -> Tuple[bool, str, str]:
length = self._get_frag_len()
sequence = [self._random.choice("ACGT") for _ in range(length)]
self._contam_id += 1
name = "Seq_junk_%i" % (self._contam_id,)
return (False, name, "".join(sequence))
def _get_endogenous_sequence(self) -> Tuple[bool, str, str]:
length = self._get_frag_len()
max_position = len(self._specimen.sequence) - length
position = self._random.randint(0, max_position)
strand = self._random.choice(("fw", "rv"))
sequence = self._specimen.sequence[position : position + length]
real_pos = self._specimen.positions[position]
if strand == "rv":
sequence = reverse_complement("".join(sequence))
self._endog_id += 1
name = "Seq_%i_%i_%i_%s" % (self._endog_id, real_pos, length, strand)
return (True, name, sequence)
def _get_frag_len(self):
length = toint(
self._random.gauss(
self._options.sample_frag_len_mu, self._options.sample_frag_len_sigma
)
)
return max(
self._options.sample_frag_len_min,
min(self._options.sample_frag_len_max, length),
)
class Damage:
def __init__(self, options: Args, sample: Sample):
self._options = options
self._sample = sample
self._random = random.Random(options.damage_seed)
self._rates = self._calc_damage_rates(options)
def get_fragment(self) -> Tuple[str, str]:
is_endogenous, name, sequence = self._sample.get_fragment()
if is_endogenous and self._options.damage:
sequence = self._damage_sequence(sequence)
return (name, sequence)
def _damage_sequence(self, sequence: str) -> str:
result: List[str] = []
length = len(sequence)
for (position, nucleotide) in enumerate(sequence):
if nucleotide == "C":
if self._random.random() < self._rates[position]:
nucleotide = "T"
elif nucleotide == "G":
rv_position = length - position - 1
if self._random.random() < self._rates[rv_position]:
nucleotide = "A"
result.append(nucleotide)
return "".join(result)
@classmethod
def _calc_damage_rates(cls, options: Args) -> List[float]:
return [
_dexp(options.damage_lambda, position)
for position in range(options.sample_frag_len_max)
]
class Library:
def __init__(self, options: Args, damaged_sample: Damage):
self._options = options
self._damaged_sample = damaged_sample
self._cache = []
self._rng = random.Random(options.library_seed)
self.barcode = options.library_barcode
if self.barcode is None:
self.barcode = "".join(self._rng.choice("ACGT") for _ in range(6))
assert len(self.barcode) == 6, options.barcode
pcr1 = PCR1 % (self.barcode,)
self.lanes = self._generate_lanes(options, self._rng, damaged_sample, pcr1)
@classmethod
def _generate_lanes(
cls,
options: Args,
rng: random.Random,
sample: Damage,
pcr1: str,
):
lane_counts: List[int] = []
for _ in range(options.lanes_num):
lane_counts.append(
toint(random.gauss(options.lanes_reads_mu, options.lanes_reads_sigma))
)
reads = cls._generate_reads(options, rng, sample, sum(lane_counts), pcr1)
lanes: List[Lane] = []
for count in lane_counts:
lanes.append(Lane(options, reads[:count]))
reads = reads[count:]
return lanes
@classmethod
def _generate_reads(
cls,
options: Args,
rng: random.Random,
sample: Damage,
minimum: int,
pcr1: str,
) -> List[Tuple[str, str, str]]:
reads: List[Tuple[str, str, str]] = []
while len(reads) < minimum:
name, sequence = sample.get_fragment()
cur_forward = sequence + pcr1
cur_reverse = reverse_complement(sequence) + PCR2
# Number of PCR copies -- minimum 1
num_dupes = toint(_rexp(options.library_pcr_lambda, rng)) + 1
for dupe_id in range(num_dupes):
cur_name = "%s_%s" % (name, dupe_id)
reads.append((cur_name, cur_forward, cur_reverse))
random.shuffle(reads)
return reads
class Lane:
def __init__(self, options: Args, reads: List[Tuple[str, str, str]]):
rng = random.Random()
choices = _get_weighted_choices(
rng, options.reads_sub_rate, options.reads_indel_rate
)
self._sequences: List[Tuple[str, str, str]] = []
for (name, forward, reverse) in reads:
forward, _ = _mutate_sequence(
rng, choices, forward, options.reads_indel_lambda
)
if len(forward) < options.reads_len:
forward += "A" * (options.reads_len - len(forward))
elif len(forward) > options.reads_len:
forward = forward[: options.reads_len]
reverse, _ = _mutate_sequence(
rng, choices, reverse, options.reads_indel_lambda
)
if len(reverse) < options.reads_len:
reverse += "T" * (options.reads_len - len(reverse))
elif len(reverse) > options.reads_len:
reverse = reverse[: options.reads_len]
self._sequences.append((name, "".join(forward), "".join(reverse)))
@property
def sequences(self):
return self._sequences
def parse_args(argv: List[str]) -> Args:
parser = argparse.ArgumentParser()
parser.add_argument("fasta", help="Input FASTA file")
parser.add_argument("output_prefix", help="Prefix for output filenames")
group = parser.add_argument_group("Specimen")
group.add_argument(
"--specimen-seed",
default=None,
type=int,
help="Seed used to initialize the 'speciment', for the "
"creation of a random genotype. Set to a specific "
"values if runs are to be done for the same "
"genotype.",
)
group.add_argument("--specimen-sub-rate", default=0.005, type=float)
group.add_argument("--specimen-indel-rate", default=0.0005, type=float)
group.add_argument("--specimen-indel-lambda", default=0.9, type=float)
group = parser.add_argument_group("Samples from specimens")
group.add_argument("--sample-seed", default=None)
group.add_argument(
"--sample-frag-length-mu", dest="sample_frag_len_mu", default=100, type=int
)
group.add_argument(
"--sample-frag-length-sigma", dest="sample_frag_len_sigma", default=30, type=int
)
group.add_argument(
"--sample-frag-length-min", dest="sample_frag_len_min", default=0, type=int
)
group.add_argument(
"--sample-frag-length-max", dest="sample_frag_len_max", default=500, type=int
)
group.add_argument(
"--sample-endogenous_mu", dest="sample_endog_mu", default=0.75, type=float
)
group.add_argument(
"--sample-endogenous_sigma", dest="sample_endog_sigma", default=0.10, type=float
)
group = parser.add_argument_group("Post mortem damage of samples")
group.add_argument("--damage", dest="damage", default=False, action="store_true")
group.add_argument("--damage-seed", dest="damage_seed", default=None)
group.add_argument(
"--damage-lambda", dest="damage_lambda", default=0.25, type=float
)
group = parser.add_argument_group("Libraries from samples")
group.add_argument("--library-seed", dest="library_seed", default=None)
group.add_argument(
"--library-pcr-lambda", dest="library_pcr_lambda", default=3, type=float
)
group.add_argument("--library-barcode", dest="library_barcode", default=None)
group = parser.add_argument_group("Lanes from libraries")
group.add_argument("--lanes", dest="lanes_num", default=3, type=int)
group.add_argument(
"--lanes-reads-mu", dest="lanes_reads_mu", default=10000, type=int
)
group.add_argument(
"--lanes-reads-sigma", dest="lanes_reads_sigma", default=2500, type=int
)
group.add_argument(
"--lanes-reads-per-file", dest="lanes_per_file", default=2500, type=int
)
group = parser.add_argument_group("Reads from lanes")
group.add_argument(
"--reads-sub-rate", dest="reads_sub_rate", default=0.005, type=float
)
group.add_argument(
"--reads-indel-rate", dest="reads_indel_rate", default=0.0005, type=float
)
group.add_argument(
"--reads-indel-lambda", dest="reads_indel_lambda", default=0.9, type=float
)
group.add_argument("--reads-length", dest="reads_len", default=100, type=int)
return cast(Args, parser.parse_args(argv))
def main(argv: List[str]) -> int:
options = parse_args(argv)
print("Generating %i lane(s) of synthetic reads" % (options.lanes_num,))
specimen = Specimen(options, options.fasta)
sample = Sample(options, specimen)
damage = Damage(options, sample)
library = Library(options, damage)
for (lnum, lane) in enumerate(library.lanes, start=1):
fragments = fragment(options.lanes_per_file, lane.sequences)
for (readsnum, reads) in enumerate(fragments, start=1):
templ = "%s%s_L%i_R%%s_%02i.fastq.gz" % (
options.output_prefix,
library.barcode,
lnum,
readsnum,
)
print(" Writing %s" % (templ % "{Pair}",))
with gzip.open(templ % 1, "wt") as out_1:
with gzip.open(templ % 2, "wt") as out_2:
for (name, seq_1, seq_2) in reads:
out_1.write("@%s%s/1\n%s\n" % (library.barcode, name, seq_1))
out_1.write("+\n%s\n" % ("I" * len(seq_1),))
out_2.write("@%s%s/2\n%s\n" % (library.barcode, name, seq_2))
out_2.write("+\n%s\n" % ("H" * len(seq_2),))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
from django.db.models import Q
from django.shortcuts import render
from django.utils.safestring import mark_safe
from django.views.decorators.cache import cache_page
import jingo
import jinja2
from tower import ugettext_lazy as _lazy
import amo
from amo.helpers import urlparams
from addons.models import Addon
from translations.models import Translation
def _install_button(context, addon, version=None, show_contrib=True,
show_warning=True, src='', collection=None, size='',
detailed=False, mobile=False, impala=False):
"""If version isn't given, we use the latest version."""
request = context['request']
app, lang = context['APP'], context['LANG']
src = src or context.get('src') or request.GET.get('src', '')
collection = ((collection.uuid if hasattr(collection, 'uuid') else None)
or collection
or context.get('collection')
or request.GET.get('collection')
or request.GET.get('collection_id')
or request.GET.get('collection_uuid'))
button = install_button_factory(addon, app, lang, version, show_contrib,
show_warning, src, collection, size,
detailed, impala)
installed = (request.user.is_authenticated() and
addon.id in request.amo_user.mobile_addons)
c = {'button': button, 'addon': addon, 'version': button.version,
'installed': installed}
if impala:
template = 'addons/impala/button.html'
elif mobile:
template = 'addons/mobile/button.html'
else:
template = 'addons/button.html'
t = jingo.render_to_string(request, template, c)
return jinja2.Markup(t)
@jinja2.contextfunction
def install_button(context, addon, **kwargs):
backup = kwargs.pop('show_backup', True)
base = _install_button(context, addon, **kwargs)
if backup and addon.backup_version:
kwargs['version'] = addon.backup_version
backup = _install_button(context, addon, **kwargs)
return mark_safe('%s\n<div class="backup-button hidden '
'install-wrapper">%s</div>' % (base, backup))
return base
@jinja2.contextfunction
def big_install_button(context, addon, **kwargs):
from addons.helpers import statusflags
backup = kwargs.pop('show_backup', True)
flags = jinja2.escape(statusflags(context, addon))
base = _install_button(context, addon, detailed=True, size='prominent',
**kwargs)
params = [flags, base]
wrap = u'<div class="install-wrapper %s">%s</div>'
if backup and addon.backup_version:
params.append(flags)
params.append(_install_button(context, addon,
version=addon.backup_version,
detailed=True, size='prominent',
**kwargs))
wrap += '<div class="backup-button hidden install-wrapper %s">%s</div>'
return jinja2.Markup(wrap % (tuple(params)))
@jinja2.contextfunction
def mobile_install_button(context, addon, **kwargs):
from addons.helpers import statusflags
b = _install_button(context, addon, detailed=True, size='prominent',
mobile=True, **kwargs)
flags = jinja2.escape(statusflags(context, addon))
s = u'<div class="install-wrapper %s">%s</div>'
return jinja2.Markup(s % (flags, b))
def install_button_factory(*args, **kwargs):
button = InstallButton(*args, **kwargs)
# Order matters. We want to highlight unreviewed before featured. They
# should be mutually exclusive, but you never know.
classes = (('lite', LiteInstallButton),
('unreviewed', UnreviewedInstallButton),
('featured', FeaturedInstallButton))
for pred, cls in classes:
if getattr(button, pred, False):
button.__class__ = cls
break
button.prepare()
return button
class InstallButton(object):
button_class = ['download']
install_class = []
install_text = ''
def __init__(self, addon, app, lang, version=None, show_contrib=True,
show_warning=True, src='', collection=None, size='',
detailed=False, impala=False):
self.addon, self.app, self.lang = addon, app, lang
self.latest = version is None
self.version = version or addon.current_version
self.src = src
self.collection = collection
self.size = size
self.detailed = detailed
self.impala = impala
self.unreviewed = addon.is_unreviewed()
self.featured = (not self.unreviewed
and addon.is_featured(app, lang))
self.is_premium = addon.is_premium()
self.is_webapp = addon.is_webapp()
self._show_contrib = show_contrib
self.show_contrib = (show_contrib and addon.takes_contributions
and addon.annoying == amo.CONTRIB_ROADBLOCK)
self.show_warning = show_warning and self.unreviewed
def prepare(self):
"""Called after the class is set to manage contributions."""
# Get a copy for this instance.
self.button_class = list(self.__class__.button_class)
self.install_class = list(self.__class__.install_class)
if self.show_contrib:
try:
self.button_class.remove('download')
except ValueError:
pass
self.button_class += ['contrib', 'go']
self.install_class.append('contrib')
if self.size:
self.button_class.append(self.size)
if self.is_webapp:
self.install_class.append('webapp')
def attrs(self):
rv = {}
addon = self.addon
if (self._show_contrib and addon.takes_contributions
and addon.annoying == amo.CONTRIB_AFTER):
rv['data-after'] = 'contrib'
if addon.type == amo.ADDON_SEARCH:
rv['data-search'] = 'true'
return rv
def links(self):
return []
def fix_link(self, url):
if self.src:
url = urlparams(url, src=self.src)
if self.collection:
url = urlparams(url, collection_id=self.collection)
return url
class FeaturedInstallButton(InstallButton):
install_class = ['featuredaddon']
install_text = _lazy(u'Featured', 'install_button')
class UnreviewedInstallButton(InstallButton):
install_class = ['unreviewed']
install_text = _lazy(u'Not Reviewed', 'install_button')
button_class = 'download caution'.split()
class LiteInstallButton(InstallButton):
install_class = ['lite']
button_class = ['caution']
install_text = _lazy(u'Experimental', 'install_button')
class Link(object):
def __init__(self, text, url, os=None, file=None):
self.text, self.url, self.os, self.file = text, url, os, file
# Cache it for a year.
@cache_page(60 * 60 * 24 * 365)
def js(request):
return render(request, 'addons/popups.html',
content_type='text/javascript')
def smorgasbord(request):
"""
Gather many different kinds of tasty add-ons together.
Great for testing install buttons.
"""
def _compat(min, max):
# Helper for faking compatible_apps.
return {'min': {'version': min}, 'max': {'version': max}}
addons = []
normal_version = _compat('1.0', '10.0')
older_version = _compat('1.0', '2.0')
newer_version = _compat('9.0', '10.0')
def all_versions(addon, base_tag):
x = (('', normal_version),
(' + older version', older_version),
(' + newer version', newer_version))
for extra, version in x:
a = addon()
a.tag = base_tag + extra
a.compatible_apps[request.APP] = version
addons.append(a)
# Featured.
featured = Addon.objects.featured(request.APP)
addons.append(featured[0])
addons[-1].tag = 'featured'
normal = Addon.objects.listed(request.APP).exclude(id__in=featured)
# Normal, Older Version, Newer Version.
all_versions(lambda: normal[0], 'normal')
# Unreviewed.
exp = Addon.objects.unreviewed()
all_versions(lambda: exp[0], 'unreviewed')
# Multiple Platforms.
addons.append(Addon.objects.get(id=2313))
addons[-1].tag = 'platformer'
# Multiple Platforms + EULA.
addons.append(Addon.objects.get(id=2313))
addons[-1].eula = Translation(localized_string='xxx')
addons[-1].tag = 'platformer + eula'
# Incompatible Platform + EULa.
addons.append(Addon.objects.get(id=5308))
addons[-1].eula = Translation(localized_string='xxx')
addons[-1].tag = 'windows/linux-only + eula'
# Incompatible Platform.
all_versions(lambda: Addon.objects.get(id=5308), 'windows/linux-only')
# EULA.
eula = (Q(eula__isnull=False, eula__localized_string__isnull=False)
& ~Q(eula__localized_string=''))
addons.append(normal.filter(eula)[0])
addons[-1].tag = 'eula'
addons.append(exp.filter(eula)[0])
addons[-1].tag = 'eula + unreviewed'
# Contributions.
addons.append(normal.filter(annoying=1)[0])
addons[-1].tag = 'contrib: passive'
addons.append(normal.filter(annoying=2)[0])
addons[-1].tag = 'contrib: after'
addons.append(normal.filter(annoying=3)[0])
addons[-1].tag = 'contrib: roadblock'
addons.append(Addon.objects.get(id=2608))
addons[-1].tag = 'after + eula'
addons.append(Addon.objects.get(id=8442))
addons[-1].tag = 'roadblock + eula'
# Other App.
addons.append(Addon.objects.get(id=5326))
addons[-1].tag = 'tbird'
# Mobile.
addons.append(Addon.objects.get(id=53476))
addons[-1].tag = 'mobile'
# Search Engine.
addons.append(Addon.objects.filter(type=amo.ADDON_SEARCH)[0])
addons[-1].tag = 'search engine'
# Beta Version
beta = normal.filter(versions__files__status=amo.STATUS_BETA)[0]
beta.tag = 'beta version'
# Future Version.
# No versions.
return render(request, 'addons/smorgasbord.html',
{'addons': addons, 'beta': beta})
|
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import re
import subprocess
import sys
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
# parse command line
argp = argparse.ArgumentParser(description='copyright checker')
argp.add_argument('-o',
'--output',
default='details',
choices=['list', 'details'])
argp.add_argument('-s', '--skips', default=0, action='store_const', const=1)
argp.add_argument('-a', '--ancient', default=0, action='store_const', const=1)
argp.add_argument('--precommit', action='store_true')
argp.add_argument('--fix', action='store_true')
args = argp.parse_args()
# open the license text
with open('NOTICE.txt') as f:
LICENSE_NOTICE = f.read().splitlines()
# license format by file extension
# key is the file extension, value is a format string
# that given a line of license text, returns what should
# be in the file
LICENSE_PREFIX_RE = {
'.bat': r'@rem\s*',
'.c': r'\s*(?://|\*)\s*',
'.cc': r'\s*(?://|\*)\s*',
'.h': r'\s*(?://|\*)\s*',
'.m': r'\s*\*\s*',
'.mm': r'\s*\*\s*',
'.php': r'\s*\*\s*',
'.js': r'\s*\*\s*',
'.py': r'#\s*',
'.pyx': r'#\s*',
'.pxd': r'#\s*',
'.pxi': r'#\s*',
'.rb': r'#\s*',
'.sh': r'#\s*',
'.proto': r'//\s*',
'.cs': r'//\s*',
'.mak': r'#\s*',
'.bazel': r'#\s*',
'.bzl': r'#\s*',
'Makefile': r'#\s*',
'Dockerfile': r'#\s*',
'BUILD': r'#\s*',
}
# The key is the file extension, while the value is a tuple of fields
# (header, prefix, footer).
# For example, for javascript multi-line comments, the header will be '/*', the
# prefix will be '*' and the footer will be '*/'.
# If header and footer are irrelevant for a specific file extension, they are
# set to None.
LICENSE_PREFIX_TEXT = {
'.bat': (None, '@rem', None),
'.c': (None, '//', None),
'.cc': (None, '//', None),
'.h': (None, '//', None),
'.m': ('/**', ' *', ' */'),
'.mm': ('/**', ' *', ' */'),
'.php': ('/**', ' *', ' */'),
'.js': ('/**', ' *', ' */'),
'.py': (None, '#', None),
'.pyx': (None, '#', None),
'.pxd': (None, '#', None),
'.pxi': (None, '#', None),
'.rb': (None, '#', None),
'.sh': (None, '#', None),
'.proto': (None, '//', None),
'.cs': (None, '//', None),
'.mak': (None, '#', None),
'.bazel': (None, '#', None),
'.bzl': (None, '#', None),
'Makefile': (None, '#', None),
'Dockerfile': (None, '#', None),
'BUILD': (None, '#', None),
}
_EXEMPT = frozenset((
# Generated protocol compiler output.
'examples/python/helloworld/helloworld_pb2.py',
'examples/python/helloworld/helloworld_pb2_grpc.py',
'examples/python/multiplex/helloworld_pb2.py',
'examples/python/multiplex/helloworld_pb2_grpc.py',
'examples/python/multiplex/route_guide_pb2.py',
'examples/python/multiplex/route_guide_pb2_grpc.py',
'examples/python/route_guide/route_guide_pb2.py',
'examples/python/route_guide/route_guide_pb2_grpc.py',
# Generated doxygen config file
'tools/doxygen/Doxyfile.php',
# An older file originally from outside gRPC.
'src/php/tests/bootstrap.php',
# census.proto copied from github
'tools/grpcz/census.proto',
# status.proto copied from googleapis
'src/proto/grpc/status/status.proto',
# Gradle wrappers used to build for Android
'examples/android/helloworld/gradlew.bat',
'src/android/test/interop/gradlew.bat',
# Designer-generated source
'examples/csharp/HelloworldXamarin/Droid/Resources/Resource.designer.cs',
'examples/csharp/HelloworldXamarin/iOS/ViewController.designer.cs',
# BoringSSL generated header. It has commit version information at the head
# of the file so we cannot check the license info.
'src/boringssl/boringssl_prefix_symbols.h',
))
RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+) ([Tt]he )?gRPC [Aa]uthors(\.|)'
RE_LICENSE = dict(
(k, r'\n'.join(LICENSE_PREFIX_RE[k] +
(RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
for line in LICENSE_NOTICE))
for k, v in list(LICENSE_PREFIX_RE.items()))
YEAR = datetime.datetime.now().year
LICENSE_YEAR = f'Copyright {YEAR} gRPC authors.'
def join_license_text(header, prefix, footer, notice):
text = (header + '\n') if header else ""
def add_prefix(prefix, line):
# Don't put whitespace between prefix and empty line to avoid having
# trailing whitespaces.
return prefix + ('' if len(line) == 0 else ' ') + line
text += '\n'.join(
add_prefix(prefix, (LICENSE_YEAR if re.search(RE_YEAR, line) else line))
for line in LICENSE_NOTICE)
text += '\n'
if footer:
text += footer + '\n'
return text
LICENSE_TEXT = dict(
(k,
join_license_text(LICENSE_PREFIX_TEXT[k][0], LICENSE_PREFIX_TEXT[k][1],
LICENSE_PREFIX_TEXT[k][2], LICENSE_NOTICE))
for k, v in list(LICENSE_PREFIX_TEXT.items()))
if args.precommit:
FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
else:
FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | ' \
'grep -v ^third_party/ |' \
'grep -v "\(ares_config.h\|ares_build.h\)"'
def load(name):
with open(name) as f:
return f.read()
def save(name, text):
with open(name, 'w') as f:
f.write(text)
assert (re.search(RE_LICENSE['Makefile'], load('Makefile')))
def log(cond, why, filename):
if not cond:
return
if args.output == 'details':
print(('%s: %s' % (why, filename)))
else:
print(filename)
# scan files, validate the text
ok = True
filename_list = []
try:
filename_list = subprocess.check_output(FILE_LIST_COMMAND,
shell=True).decode().splitlines()
except subprocess.CalledProcessError:
sys.exit(0)
for filename in filename_list:
if filename in _EXEMPT:
continue
# Skip check for upb generated code.
if (filename.endswith('.upb.h') or filename.endswith('.upb.c') or
filename.endswith('.upbdefs.h') or filename.endswith('.upbdefs.c')):
continue
ext = os.path.splitext(filename)[1]
base = os.path.basename(filename)
if ext in RE_LICENSE:
re_license = RE_LICENSE[ext]
license_text = LICENSE_TEXT[ext]
elif base in RE_LICENSE:
re_license = RE_LICENSE[base]
license_text = LICENSE_TEXT[base]
else:
log(args.skips, 'skip', filename)
continue
try:
text = load(filename)
except:
continue
m = re.search(re_license, text)
if m:
pass
elif 'DO NOT EDIT' not in text:
if args.fix:
text = license_text + '\n' + text
open(filename, 'w').write(text)
log(1, 'copyright missing (fixed)', filename)
else:
log(1, 'copyright missing', filename)
ok = False
if not ok and not args.fix:
print(
'You may use following command to automatically fix copyright headers:')
print(' tools/distrib/check_copyright.py --fix')
sys.exit(0 if ok else 1)
|
|
#
# Copyright (c) 2012 Patrice Munger
# This file is part of pynetdicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pynetdicom.googlecode.com
#
# Implementation of the OSI Upper Layer Services
# DICOM, Part 8, Section 7
import socket
import PDU
import time
import DULparameters
# Finite State machine action definitions
import logging
logger = logging.getLogger(__name__)
def AE_1(provider):
# Issue TRANSPORT CONNECT request primitive to local transport service
provider.RemoteClientSocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
try:
timeout_original = provider.RemoteClientSocket.gettimeout()
if provider.ConnectTimeoutSeconds is not None:
provider.RemoteClientSocket.settimeout(provider.ConnectTimeoutSeconds)
provider.RemoteClientSocket.connect(
provider.primitive.CalledPresentationAddress)
provider.RemoteClientSocket.settimeout(timeout_original)
except socket.error:
# cannot connect
provider.ToServiceUser.put(None)
def AE_2(provider):
# Send A-ASSOCIATE-RQ PDU
provider.pdu = PDU.A_ASSOCIATE_RQ_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def AE_3(provider):
# Issue A-ASSOCIATE confirmation (accept) primitive
provider.ToServiceUser.put(provider.primitive)
def AE_4(provider):
# Issue A-ASSOCIATE confirmation (reject) primitive and close transport
# connection
provider.ToServiceUser.put(provider.primitive)
provider.RemoteClientSocket.close()
provider.RemoteClientSocket = None
def AE_5(provider):
# Issue connection response primitive start ARTIM timer
# Don't need to send this primitive.
provider.Timer.Start()
def AE_6(provider):
# Stop ARTIM timer and if A-ASSOCIATE-RQ acceptable by service provider
# - Issue A-ASSOCIATE indication primitive
provider.Timer.Stop()
# Accept
provider.SM.NextState('Sta3')
provider.ToServiceUser.put(provider.primitive)
# otherwise????
def AE_7(provider):
# Send A-ASSOCIATE-AC PDU
provider.pdu = PDU.A_ASSOCIATE_AC_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def AE_8(provider):
# Send A-ASSOCIATE-RJ PDU and start ARTIM timer
provider.pdu = PDU.A_ASSOCIATE_RJ_PDU()
# not sure about this ...
if provider.primitive.Diagnostic is not None:
provider.primitive.ResultSource = provider.primitive.Diagnostic.source
#else:
# provider.primitive.Diagnostic = 1
# provider.primitive.ResultSource = 2
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def DT_1(provider):
# Send P-DATA-TF PDU
provider.pdu = PDU.P_DATA_TF_PDU()
provider.pdu.FromParams(provider.primitive)
provider.primitive = None
provider.RemoteClientSocket.send(provider.pdu.Encode())
def DT_2(provider):
# Send P-DATA indication primitive
provider.ToServiceUser.put(provider.primitive)
def AR_1(provider):
# Send A-RELEASE-RQ PDU
provider.pdu = PDU.A_RELEASE_RQ_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def AR_2(provider):
# Send A-RELEASE indication primitive
provider.ToServiceUser.put(provider.primitive)
def AR_3(provider):
# Issue A-RELEASE confirmation primitive and close transport connection
provider.ToServiceUser.put(provider.primitive)
provider.RemoteClientSocket.close()
provider.RemoteClientSocket = None
def AR_4(provider):
# Issue A-RELEASE-RP PDU and start ARTIM timer
provider.pdu = PDU.A_RELEASE_RP_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
provider.Timer.Start()
def AR_5(provider):
# Stop ARTIM timer
provider.Timer.Stop()
def AR_6(provider):
# Issue P-DATA indication
provider.ToServiceUser.put(provider.primitive)
def AR_7(provider):
# Issue P-DATA-TF PDU
provider.pdu = PDU.P_DATA_TF_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def AR_8(provider):
# Issue A-RELEASE indication (release collision)
provider.ToServiceUser.put(provider.primitive)
if provider.requestor == 1:
provider.SM.NextState('Sta9')
else:
provider.SM.NextState('Sta10')
def AR_9(provider):
# Send A-RELEASE-RP PDU
provider.pdu = PDU.A_RELEASE_RP_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def AR_10(provider):
# Issue A-RELEASE confirmation primitive
provider.ToServiceUser.put(provider.primitive)
def AA_1(provider):
# Send A-ABORT PDU (service-user source) and start (or restart
# if already started) ARTIM timer.
provider.pdu = PDU.A_ABORT_PDU()
# CHECK THIS ...
provider.pdu.AbortSource = 1
provider.pdu.ReasonDiag = 0
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
provider.Timer.Restart()
def AA_2(provider):
# Stop ARTIM timer if running. Close transport connection.
provider.Timer.Stop()
provider.RemoteClientSocket.close()
provider.RemoteClientSocket = None
def AA_3(provider):
# If (service-user initiated abort):
# - Issue A-ABORT indication and close transport connection.
# Otherwise (service-provider initiated abort):
# - Issue A-P-ABORT indication and close transport connection.
# This action is triggered by the reception of an A-ABORT PDU
provider.ToServiceUser.put(provider.primitive)
provider.RemoteClientSocket.close()
provider.RemoteClientSocket = None
def AA_4(provider):
# Issue A-P-ABORT indication primitive.
provider.primitive = DULparameters.A_ABORT_ServiceParameters()
provider.ToServiceUser.put(provider.primitive)
def AA_5(provider):
# Stop ARTIM timer.
provider.Timer.Stop()
def AA_6(provider):
# Ignore PDU.
provider.primitive = None
def AA_7(provider):
# Send A-ABORT PDU.
provider.pdu = PDU.A_ABORT_PDU()
provider.pdu.FromParams(provider.primitive)
provider.RemoteClientSocket.send(provider.pdu.Encode())
def AA_8(provider):
# Send A-ABORT PDU (service-provider source), issue and A-P-ABORT
# indication, and start ARTIM timer.
# Send A-ABORT PDU
provider.pdu = PDU.A_ABORT_PDU()
provider.pdu.Source = 2
provider.pdu.ReasonDiag = 0 # No reason given
if provider.RemoteClientSocket:
provider.RemoteClientSocket.send(provider.pdu.Encode())
# Issue A-P-ABORT indication
provider.ToServiceUser.put(provider.primitive)
provider.Timer.Start()
# Finite State Machine
# states
states = {
# No association
'Sta1': 'Idle',
# Association establishment
'Sta2': 'Transport Connection Open (Awaiting A-ASSOCIATE-RQ PDU)',
'Sta3': 'Awaiting Local A-ASSOCIATE response primitive (from local user)',
'Sta4': 'Awaiting transport connection opening to complete (from local '
'transport service',
'Sta5': 'Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU',
# Data transfer
'Sta6': 'Association established and ready for data transfer',
# Association release
'Sta7': 'Awaiting A-RELEASE-RP PDU',
'Sta8': 'Awaiting local A-RELEASE response primitive (from local user)',
'Sta9': 'Release collision requestor side; awaiting A-RELEASE response '
' (from local user)',
'Sta10': 'Release collision acceptor side; awaiting A-RELEASE-RP PDU',
'Sta11': 'Release collision requestor side; awaiting A-RELEASE-RP PDU',
'Sta12': 'Release collision acceptor side; awaiting A-RELEASE response '
'primitive (from local user)',
'Sta13': 'Awaiting Transport Connection Close Indication (Association no '
'longer exists)'
}
# actions
actions = {
# Association establishment actions
'AE-1': ('Issue TransportConnect request primitive to local transport '
'service', AE_1, 'Sta4'),
'AE-2': ('Send A_ASSOCIATE-RQ PDU', AE_2, 'Sta5'),
'AE-3': ('Issue A-ASSOCIATE confirmation (accept) primitive', AE_3,
'Sta6'),
'AE-4': ('Issue A-ASSOCIATE confirmation (reject) primitive and close '
'transport connection', AE_4, 'Sta1'),
'AE-5': ('Issue transport connection response primitive; start ARTIM '
'timer', AE_5, 'Sta2'),
'AE-6': ('Check A-ASSOCIATE-RQ', AE_6, ('Sta3', 'Sta13')),
'AE-7': ('Send A-ASSOCIATE-AC PDU', AE_7, 'Sta6'),
'AE-8': ('Send A-ASSOCIATE-RJ PDU', AE_8, 'Sta13'),
# Data transfer related actions
'DT-1': ('Send P-DATA-TF PDU', DT_1, 'Sta6'),
'DT-2': ('Send P-DATA indication primitive', DT_2, 'Sta6'),
# Assocation Release related actions
'AR-1': ('Send A-RELEASE-RQ PDU', AR_1, 'Sta7'),
'AR-2': ('Send A-RELEASE indication primitive', AR_2, 'Sta8'),
'AR-3': ('Issue A-RELEASE confirmation primitive and close transport '
'connection', AR_3, 'Sta1'),
'AR-4': ('Issue A-RELEASE-RP PDU and start ARTIM timer', AR_4, 'Sta13'),
'AR-5': ('Stop ARTIM timer', AR_5, 'Sta1'),
'AR-6': ('Issue P-DATA indication', AR_6, 'Sta7'),
'AR-7': ('Issue P-DATA-TF PDU', AR_7, 'Sta8'),
'AR-8': ('Issue A-RELEASE indication (release collision)', AR_8,
('Sta9', 'Sta10')),
'AR-9': ('Send A-RELEASE-RP PDU', AR_9, 'Sta11'),
'AR-10': ('Issue A-RELEASE confimation primitive', AR_10, 'Sta12'),
# Association abort related actions
'AA-1': ('Send A-ABORT PDU (service-user source) and start (or restart) '
'ARTIM timer', AA_1, 'Sta13'),
'AA-2': ('Stop ARTIM timer if running. Close transport connection', AA_2,
'Sta1'),
'AA-3': ('Issue A-ABORT or A-P-ABORT indication and close transport '
'connection', AA_3, 'Sta1'),
'AA-4': ('Issue A-P-ABORT indication primitive', AA_4, 'Sta1'),
'AA-5': ('Stop ARTIM timer', AA_5, 'Sta1'),
'AA-6': ('Ignore PDU', AA_6, 'Sta13'),
'AA-7': ('Send A-ABORT PDU', AA_6, 'Sta13'),
'AA-8': ('Send A-ABORT PDU, issue an A-P-ABORT indication and start '
'ARTIM timer', AA_8, 'Sta13')}
# events
events = {
'Evt1': "A-ASSOCIATE request (local user)",
'Evt2': "Transport connect confirmation (local transport service)",
'Evt3': "A-ASSOCIATE-AC PDU (received on transport connection)",
'Evt4': "A-ASSOCIATE-RJ PDU (received on transport connection)",
'Evt5': "Transport connection indication (local transport service)",
'Evt6': "A-ASSOCIATE-RQ PDU (on tranport connection)",
'Evt7': "A-ASSOCIATE response primitive (accept)",
'Evt8': "A-ASSOCIATE response primitive (reject)",
'Evt9': "P-DATA request primitive",
'Evt10': "P-DATA-TF PDU (on transport connection)",
'Evt11': "A-RELEASE request primitive",
'Evt12': "A-RELEASE-RQ PDU (on transport)",
'Evt13': "A-RELEASE-RP PDU (on transport)",
'Evt14': "A-RELEASE response primitive",
'Evt15': "A-ABORT request primitive",
'Evt16': "A-ABORT PDU (on transport)",
'Evt17': "Transport connection closed",
'Evt18': "ARTIM timer expired (rej/rel)",
'Evt19': "Unrecognized/invalid PDU"}
TransitionTable = {
('Evt1', 'Sta1'): 'AE-1',
('Evt2', 'Sta4'): 'AE-2',
('Evt3', 'Sta2'): 'AA-1',
('Evt3', 'Sta3'): 'AA-8',
('Evt3', 'Sta5'): 'AE-3',
('Evt3', 'Sta6'): 'AA-8',
('Evt3', 'Sta7'): 'AA-8',
('Evt3', 'Sta8'): 'AA-8',
('Evt3', 'Sta9'): 'AA-8',
('Evt3', 'Sta10'): 'AA-8',
('Evt3', 'Sta11'): 'AA-8',
('Evt3', 'Sta12'): 'AA-8',
('Evt3', 'Sta13'): 'AA-6',
('Evt4', 'Sta2'): 'AA-1',
('Evt4', 'Sta3'): 'AA-8',
('Evt4', 'Sta5'): 'AE-4',
('Evt4', 'Sta6'): 'AA-8',
('Evt4', 'Sta7'): 'AA-8',
('Evt4', 'Sta8'): 'AA-8',
('Evt4', 'Sta9'): 'AA-8',
('Evt4', 'Sta10'): 'AA-8',
('Evt4', 'Sta11'): 'AA-8',
('Evt4', 'Sta12'): 'AA-8',
('Evt4', 'Sta13'): 'AA-6',
('Evt5', 'Sta1'): 'AE-5',
('Evt6', 'Sta2'): 'AE-6',
('Evt6', 'Sta3'): 'AA-8',
('Evt6', 'Sta5'): 'AA-8',
('Evt6', 'Sta6'): 'AA-8',
('Evt6', 'Sta7'): 'AA-8',
('Evt6', 'Sta8'): 'AA-8',
('Evt6', 'Sta9'): 'AA-8',
('Evt6', 'Sta10'): 'AA-8',
('Evt6', 'Sta11'): 'AA-8',
('Evt6', 'Sta12'): 'AA-8',
('Evt6', 'Sta13'): 'AA-7',
('Evt7', 'Sta3'): 'AE-7',
('Evt8', 'Sta3'): 'AE-8',
('Evt9', 'Sta6'): 'DT-1',
('Evt9', 'Sta8'): 'AR-7',
('Evt10', 'Sta2'): 'AA-1',
('Evt10', 'Sta3'): 'AA-8',
('Evt10', 'Sta5'): 'AA-8',
('Evt10', 'Sta6'): 'DT-2',
('Evt10', 'Sta7'): 'AR-6',
('Evt10', 'Sta8'): 'AA-8',
('Evt10', 'Sta9'): 'AA-8',
('Evt10', 'Sta10'): 'AA-8',
('Evt10', 'Sta11'): 'AA-8',
('Evt10', 'Sta12'): 'AA-8',
('Evt10', 'Sta13'): 'AA-6',
('Evt11', 'Sta6'): 'AR-1',
('Evt12', 'Sta2'): 'AA-1',
('Evt12', 'Sta3'): 'AA-8',
('Evt12', 'Sta5'): 'AA-8',
('Evt12', 'Sta6'): 'AR-2',
('Evt12', 'Sta7'): 'AR-8',
('Evt12', 'Sta8'): 'AA-8',
('Evt12', 'Sta9'): 'AA-8',
('Evt12', 'Sta10'): 'AA-8',
('Evt12', 'Sta11'): 'AA-8',
('Evt12', 'Sta12'): 'AA-8',
('Evt12', 'Sta13'): 'AA-6',
('Evt13', 'Sta2'): 'AA-1',
('Evt13', 'Sta3'): 'AA-8',
('Evt13', 'Sta5'): 'AA-8',
('Evt13', 'Sta6'): 'AA-8',
('Evt13', 'Sta7'): 'AR-3',
('Evt13', 'Sta8'): 'AA-8',
('Evt13', 'Sta9'): 'AA-8',
('Evt13', 'Sta10'): 'AR-10',
('Evt13', 'Sta11'): 'AR-3',
('Evt13', 'Sta12'): 'AA-8',
('Evt13', 'Sta13'): 'AA-6',
('Evt14', 'Sta8'): 'AR-4',
('Evt14', 'Sta9'): 'AR-9',
('Evt14', 'Sta12'): 'AR-4',
('Evt15', 'Sta3'): 'AA-1',
('Evt15', 'Sta4'): 'AA-2',
('Evt15', 'Sta5'): 'AA-1',
('Evt15', 'Sta6'): 'AA-1',
('Evt15', 'Sta7'): 'AA-1',
('Evt15', 'Sta8'): 'AA-1',
('Evt15', 'Sta9'): 'AA-1',
('Evt15', 'Sta10'): 'AA-1',
('Evt15', 'Sta11'): 'AA-1',
('Evt15', 'Sta12'): 'AA-1',
('Evt16', 'Sta2'): 'AA-2',
('Evt16', 'Sta3'): 'AA-3',
('Evt16', 'Sta5'): 'AA-3',
('Evt16', 'Sta6'): 'AA-3',
('Evt16', 'Sta7'): 'AA-3',
('Evt16', 'Sta8'): 'AA-3',
('Evt16', 'Sta9'): 'AA-3',
('Evt16', 'Sta10'): 'AA-3',
('Evt16', 'Sta11'): 'AA-3',
('Evt16', 'Sta12'): 'AA-3',
('Evt16', 'Sta13'): 'AA-2',
('Evt17', 'Sta2'): 'AA-5',
('Evt17', 'Sta3'): 'AA-4',
('Evt17', 'Sta4'): 'AA-4',
('Evt17', 'Sta5'): 'AA-4',
('Evt17', 'Sta6'): 'AA-4',
('Evt17', 'Sta7'): 'AA-4',
('Evt17', 'Sta8'): 'AA-4',
('Evt17', 'Sta9'): 'AA-4',
('Evt17', 'Sta10'): 'AA-4',
('Evt17', 'Sta11'): 'AA-4',
('Evt17', 'Sta12'): 'AA-4',
('Evt17', 'Sta13'): 'AR-5',
('Evt18', 'Sta2'): 'AA-2',
('Evt18', 'Sta13'): 'AA-2',
('Evt19', 'Sta2'): 'AA-1',
('Evt19', 'Sta3'): 'AA-8',
('Evt19', 'Sta5'): 'AA-8',
('Evt19', 'Sta6'): 'AA-8',
('Evt19', 'Sta7'): 'AA-8',
('Evt19', 'Sta8'): 'AA-8',
('Evt19', 'Sta9'): 'AA-8',
('Evt19', 'Sta10'): 'AA-8',
('Evt19', 'Sta11'): 'AA-8',
('Evt19', 'Sta12'): 'AA-8',
('Evt19', 'Sta13'): 'AA-7'}
class StateMachine:
def __init__(self, provider):
self.CurrentState = 'Sta1'
self.provider = provider
def Action(self, event, c):
""" Execute the action triggered by event """
try:
action_name = TransitionTable[(event, self.CurrentState)]
except:
logger.debug('%s: current state is: %s %s' %
(self.provider.name, self.CurrentState,
states[self.CurrentState]))
logger.debug('%s: event: %s %s' %
(self.provider.name, event, events[event]))
raise
return
action = actions[action_name]
try:
logger.debug('')
logger.debug('%s: current state is: %s %s' %
(self.provider.name, self.CurrentState,
states[self.CurrentState]))
logger.debug('%s: event: %s %s' %
(self.provider.name, event, events[event]))
logger.debug('%s: entering action: (%s, %s) %s %s' %
(self.provider.name, event, self.CurrentState,
action_name, actions[action_name][0]))
action[1](c)
#if type(action[2]) != type(()):
if not isinstance(action[2], tuple):
# only one next state possible
self.CurrentState = action[2]
logger.debug('%s: action complete. State is now %s %s' %
(self.provider.name, self.CurrentState,
states[self.CurrentState]))
except:
raise
self.provider.Kill()
def NextState(self, state):
self.CurrentState = state
|
|
import sublime
import sublime_plugin
import os
import re
import time
import subprocess
import json
import datetime, calendar
PLUGIN_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
HELPER_DIRECTORY = PLUGIN_DIRECTORY + '/'
FLAT_YAML_KEYS = HELPER_DIRECTORY + 'flat_yaml_keys.rb'
# stolen from https://github.com/alienhard/SublimeAllAutocomplete
# limits to prevent bogging down the system
MIN_WORD_SIZE = 3
MAX_WORD_SIZE = 50
MAX_VIEWS = 20
MAX_WORDS_PER_VIEW = 100
MAX_FIX_TIME_SECS_PER_VIEW = 0.01
class YamlChecker:
def __init__(self, locale_path):
self.locale_path = locale_path
self.keys = []
def reload(self):
self.timestamp = datetime.datetime.now(datetime.timezone.utc).timestamp()
self.keys = self.yaml_keys()
def yaml_keys(self):
command = ['ruby', FLAT_YAML_KEYS, self.locale_path]
json_keys = subprocess.check_output(command).decode("utf-8")
return json.loads(json_keys)
class CorrectAutoCompletionCommand(sublime_plugin.TextCommand):
def run(self, edit, col=None):
"""
replaces the content of a quoted string with a somestring, indentified using the start index
e.g. "keykey.foo" can be replaced to "key.foo"
this is necessary because sublime's autocompletion inserts the value rather than replaces what's already there.
"""
view = sublime.active_window().active_view()
sel = view.sel()[0].a
(row, _) = view.rowcol(sel)
line = view.substr(view.line(sel))
end_index = line.find("'", col)
if end_index == -1:
end_index = line.find('"', col)
start_index = line.rfind("'", 0, col)
if start_index == -1:
start_index = line.rfind('"', 0, col)
completion = line[col:end_index]
start_point = self.view.text_point(row, start_index + 1)
end_point = self.view.text_point(row, end_index)
region = sublime.Region(start_point, end_point)
view.replace(edit, region, completion)
class RubyI18nAutocomplete(sublime_plugin.EventListener):
def on_activated(self, view):
self.key_loader = YamlChecker(self.locale_path())
self.key_loader.reload()
self.completion_start_col = None
def on_commit_completion(self):
print("foo")
def on_post_text_command(self, view, command_name, args):
if command_name != "commit_completion":
return None
valid_scopes = self.get_setting('ri18n_valid_scopes',view)
sel = view.sel()[0].a
if not any(s in view.scope_name(sel) for s in valid_scopes):
return None
if self.completion_start_col:
view.run_command("correct_auto_completion", { 'col': self.completion_start_col })
self.completion_start_col = None
def get_setting(self, string, view=None):
if view and view.settings().get(string):
return view.settings().get(string)
else:
return sublime.load_settings('i18n.sublime-settings').get(string)
def locale_path(self):
locales_directory = ''
for path in sublime.active_window().folders():
locales_directory = path + '/config/locales'
break
return locales_directory
def quoted_string_region(self, view):
sel = view.sel()[0].a
line = view.substr(view.line(sel))
(row, col) = view.rowcol(sel)
end_index = line.find("'", col)
if end_index == -1:
end_index = line.find('"', col)
start_index = line.rfind("'", 0, col)
if start_index == -1:
start_index = line.rfind('"', 0, col)
return (start_index, end_index, col)
def on_query_completions(self, view, prefix, locations):
# don't do anything unless we are inside ruby strings
valid_scopes = self.get_setting('ri18n_valid_scopes',view)
sel = view.sel()[0].a
# don't do anything if we have nothing
if len(self.key_loader.keys) == 0:
return []
if not any(s in view.scope_name(sel) for s in valid_scopes):
return []
line = view.substr(view.line(sel))
(start_index, end_index, col) = self.quoted_string_region(view)
quoted_string = line[start_index+1:end_index]
self.completion_start_col = col - len(prefix)
words = self.word_completion(view, prefix, locations)
for key in self.key_loader.keys:
if key.startswith(quoted_string) or len(quoted_string) == 0:
words.append(key)
matches = [(w, w) for w in words]
return matches
# all word completion plugin... might be deleted later on
#
#
def word_completion(self, view, prefix, locations):
words = []
# Limit number of views but always include the active view. This
# view goes first to prioritize matches close to cursor position.
other_views = [v for v in sublime.active_window().views() if v.id != view.id]
views = [view] + other_views
views = views[0:MAX_VIEWS]
for v in views:
if len(locations) > 0 and v.id == view.id:
view_words = v.extract_completions(prefix, locations[0])
else:
view_words = v.extract_completions(prefix)
view_words = self.filter_words(view_words)
view_words = self.fix_truncation(v, view_words)
words += view_words
words = self.without_duplicates(words)
return words
def filter_words(self, words):
words = words[0:MAX_WORDS_PER_VIEW]
return [w for w in words if MIN_WORD_SIZE <= len(w) <= MAX_WORD_SIZE]
# keeps first instance of every word and retains the original order
# (n^2 but should not be a problem as len(words) <= MAX_VIEWS*MAX_WORDS_PER_VIEW)
def without_duplicates(self, words):
result = []
for w in words:
if w not in result:
result.append(w)
return result
# Ugly workaround for truncation bug in Sublime when using view.extract_completions()
# in some types of files.
def fix_truncation(self, view, words):
fixed_words = []
start_time = time.time()
for i, w in enumerate(words):
#The word is truncated if and only if it cannot be found with a word boundary before and after
# this fails to match strings with trailing non-alpha chars, like
# 'foo?' or 'bar!', which are common for instance in Ruby.
match = view.find(r'\b' + re.escape(w) + r'\b', 0)
truncated = match.empty()
if truncated:
#Truncation is always by a single character, so we extend the word by one word character before a word boundary
extended_words = []
view.find_all(r'\b' + re.escape(w) + r'\w\b', 0, "$0", extended_words)
if len(extended_words) > 0:
fixed_words += extended_words
else:
# to compensate for the missing match problem mentioned above, just
# use the old word if we didn't find any extended matches
fixed_words.append(w)
else:
#Pass through non-truncated words
fixed_words.append(w)
# if too much time is spent in here, bail out,
# and don't bother fixing the remaining words
if time.time() - start_time > MAX_FIX_TIME_SECS_PER_VIEW:
return fixed_words + words[i+1:]
return fixed_words
|
|
#! /usr/bin/env python
#
# example2_tk.py -- Simple, configurable FITS viewer.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, os
import logging
import Tkinter
from tkFileDialog import askopenfilename
from ginga import AstroImage
from ginga.tkw.ImageViewTk import ImageViewCanvas
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class FitsViewer(object):
def __init__(self, logger):
self.logger = logger
self.drawcolors = ['white', 'black', 'red', 'yellow', 'blue', 'green']
root = Tkinter.Tk()
root.title("ImageViewTk Example")
#root.set_border_width(2)
#root.connect("delete_event", lambda w, e: self.quit(w))
self.root = root
#self.select = FileSelection.FileSelection()
vbox = Tkinter.Frame(root, relief=Tkinter.RAISED, borderwidth=1)
vbox.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=1)
canvas = Tkinter.Canvas(vbox, bg="grey", height=512, width=512)
canvas.pack(side=Tkinter.TOP, fill=Tkinter.BOTH, expand=1)
fi = ImageViewCanvas(logger)
fi.set_widget(canvas)
#fi.set_redraw_lag(0.0)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.enable_draw(False)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
fi.show_pan_mark(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
DrawingCanvas = fi.getDrawClass('drawingcanvas')
canvas = DrawingCanvas()
canvas.enable_draw(True)
#canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='blue')
canvas.setSurface(fi)
self.canvas = canvas
# add canvas to view
fi.add(canvas)
canvas.ui_setActive(True)
fi.configure(512, 512)
hbox = Tkinter.Frame(root)
hbox.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=0)
self.readout = Tkinter.Label(root, text='')
self.readout.pack(side=Tkinter.BOTTOM, fill=Tkinter.X, expand=0)
self.drawtypes = fi.get_drawtypes()
## wdrawtype = ttk.Combobox(root, values=self.drawtypes,
## command=self.set_drawparams)
## index = self.drawtypes.index('ruler')
## wdrawtype.current(index)
wdrawtype = Tkinter.Entry(hbox, width=12)
wdrawtype.insert(0, 'rectangle')
wdrawtype.bind("<Return>", self.set_drawparams)
self.wdrawtype = wdrawtype
# wdrawcolor = ttk.Combobox(root, values=self.drawcolors,
# command=self.set_drawparams)
# index = self.drawcolors.index('blue')
# wdrawcolor.current(index)
wdrawcolor = Tkinter.Entry(hbox, width=12)
wdrawcolor.insert(0, 'blue')
wdrawcolor.bind("<Return>", self.set_drawparams)
self.wdrawcolor = wdrawcolor
self.vfill = Tkinter.IntVar()
wfill = Tkinter.Checkbutton(hbox, text="Fill", variable=self.vfill)
self.wfill = wfill
walpha = Tkinter.Entry(hbox, width=12)
walpha.insert(0, '1.0')
walpha.bind("<Return>", self.set_drawparams)
self.walpha = walpha
wclear = Tkinter.Button(hbox, text="Clear Canvas",
command=self.clear_canvas)
wopen = Tkinter.Button(hbox, text="Open File",
command=self.open_file)
wquit = Tkinter.Button(hbox, text="Quit",
command=lambda: self.quit(root))
for w in (wquit, wclear, walpha, Tkinter.Label(hbox, text='Alpha:'),
wfill, wdrawcolor, wdrawtype, wopen):
w.pack(side=Tkinter.RIGHT)
def get_widget(self):
return self.root
def set_drawparams(self, evt):
kind = self.wdrawtype.get()
color = self.wdrawcolor.get()
alpha = float(self.walpha.get())
fill = self.vfill.get() != 0
params = { 'color': color,
'alpha': alpha,
#'cap': 'ball',
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.deleteAllObjects()
def load_file(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.fitsimage.set_image(image)
self.root.title(filepath)
def open_file(self):
filename = askopenfilename(filetypes=[("allfiles","*"),
("fitsfiles","*.fits")])
self.load_file(filename)
def motion(self, fitsimage, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = fitsimage.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = fitsimage.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warn("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.config(text=text)
def quit(self, root):
root.destroy()
return True
def main(options, args):
logger = logging.getLogger("example1")
logger.setLevel(logging.INFO)
fmt = logging.Formatter(STD_FORMAT)
stderrHdlr = logging.StreamHandler()
stderrHdlr.setFormatter(fmt)
logger.addHandler(stderrHdlr)
fv = FitsViewer(logger)
top = fv.get_widget()
if len(args) > 0:
fv.load_file(args[0])
top.mainloop()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=logging.INFO,
help="Set logging level to LEVEL")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print("%s profile:" % sys.argv[0])
profile.run('main(options, args)')
else:
main(options, args)
# END
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/tlvs/tlv/ero-path/segments/segment/ipv4-segment/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the IPv4 segment of the ERO
"""
__slots__ = ("_path_helper", "_extmethods", "__address")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
"tlvs",
"tlv",
"ero-path",
"segments",
"segment",
"ipv4-segment",
"state",
]
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment/state/address (inet:ipv4-address-no-zone)
YANG Description: The IPv4 address of the hop within the ERO
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment/state/address (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: The IPv4 address of the hop within the ERO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """address must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__address = t
if hasattr(self, "_set"):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
address = __builtin__.property(_get_address)
_pyangbind_elements = OrderedDict([("address", address)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/tlvs/tlv/ero-path/segments/segment/ipv4-segment/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the IPv4 segment of the ERO
"""
__slots__ = ("_path_helper", "_extmethods", "__address")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
"tlvs",
"tlv",
"ero-path",
"segments",
"segment",
"ipv4-segment",
"state",
]
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment/state/address (inet:ipv4-address-no-zone)
YANG Description: The IPv4 address of the hop within the ERO
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_path/segments/segment/ipv4_segment/state/address (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: The IPv4 address of the hop within the ERO
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """address must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__address = t
if hasattr(self, "_set"):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
address = __builtin__.property(_get_address)
_pyangbind_elements = OrderedDict([("address", address)])
|
|
from __future__ import unicode_literals
import os
import fcntl
import select
import signal
import errno
import threading
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.utils import DummyContext, in_main_thread
from .base import EventLoop, INPUT_TIMEOUT
from .callbacks import EventLoopCallbacks
from .inputhook import InputHookContext
from .posix_utils import PosixStdinReader
from .utils import TimeIt
__all__ = (
'PosixEventLoop',
)
class PosixEventLoop(EventLoop):
"""
Event loop for posix systems (Linux, Mac os X).
"""
def __init__(self, inputhook=None):
assert inputhook is None or callable(inputhook)
self.running = False
self.closed = False
self._running = False
self._callbacks = None
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
# Create inputhook context.
self._inputhook_context = InputHookContext(inputhook) if inputhook else None
def run(self, stdin, callbacks):
"""
The input 'event loop'.
"""
assert isinstance(callbacks, EventLoopCallbacks)
assert not self._running
if self.closed:
raise Exception('Event loop already closed.')
self._running = True
self._callbacks = callbacks
inputstream = InputStream(callbacks.feed_key)
current_timeout = INPUT_TIMEOUT
# Create reader class.
stdin_reader = PosixStdinReader(stdin)
# Only attach SIGWINCH signal handler in main thread.
# (It's not possible to attach signal handlers in other threads. In
# that case we should rely on a the main thread to call this manually
# instead.)
if in_main_thread():
ctx = call_on_sigwinch(self.received_winch)
else:
ctx = DummyContext()
with ctx:
while self._running:
# Call inputhook.
with TimeIt() as inputhook_timer:
if self._inputhook_context:
def ready(wait):
" True when there is input ready. The inputhook should return control. "
return self._ready_for_reading(stdin, current_timeout if wait else 0) != []
self._inputhook_context.call_inputhook(ready)
# Calculate remaining timeout. (The inputhook consumed some of the time.)
if current_timeout is None:
remaining_timeout = None
else:
remaining_timeout = max(0, current_timeout - inputhook_timer.duration)
# Wait until input is ready.
r = self._ready_for_reading(stdin, remaining_timeout)
# If we got a character, feed it to the input stream. If we got
# none, it means we got a repaint request.
if stdin in r:
# Feed input text.
data = stdin_reader.read()
inputstream.feed(data)
callbacks.redraw()
# Set timeout again.
current_timeout = INPUT_TIMEOUT
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif self._schedule_pipe[0] in r:
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
else:
# Flush all pending keys on a timeout and redraw. (This is
# most important to flush the vt100 escape key early when
# nothing else follows.)
inputstream.flush()
callbacks.redraw()
# Fire input timeout event.
callbacks.input_timeout()
current_timeout = None
self._callbacks = None
def _ready_for_reading(self, stdin, timeout=None):
"""
Return the file descriptors that are ready for reading.
"""
r, _, _ =_select([stdin, self._schedule_pipe[0]], [], [], timeout)
return r
def received_winch(self):
"""
Notify the event loop that SIGWINCH has been received
"""
# Process signal asynchronously, because this handler can write to the
# output, and doing this inside the signal handler causes easily
# reentrant calls, giving runtime errors..
# Furthur, this has to be thread safe. When the CommandLineInterface
# runs not in the main thread, this function still has to be called
# from the main thread. (The only place where we can install signal
# handlers.)
def process_winch():
if self._callbacks:
self._callbacks.terminal_size_changed()
self.call_from_executor(process_winch)
def run_in_executor(self, callback):
"""
Run a long running function in a background thread.
(This is recommended for code that could block the event loop.)
Similar to Twisted's ``deferToThread``.
"""
# Wait until the main thread is idle.
# We start the thread by using `call_from_executor`. The event loop
# favours processing input over `calls_from_executor`, so the thread
# will not start until there is no more input to process and the main
# thread becomes idle for an instant. This is good, because Python
# threading favours CPU over I/O -- an autocompletion thread in the
# background would cause a significantly slow down of the main thread.
# It is mostly noticable when pasting large portions of text while
# having real time autocompletion while typing on.
def start_executor():
threading.Thread(target=callback).start()
self.call_from_executor(start_executor)
def call_from_executor(self, callback):
"""
Call this function in the main event loop.
Similar to Twisted's ``callFromThread``.
"""
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def stop(self):
"""
Stop the event loop.
"""
self._running = False
def close(self):
self.closed = True
# Close pipes.
schedule_pipe = self._schedule_pipe
self._schedule_pipe = None
if schedule_pipe:
os.close(schedule_pipe[0])
os.close(schedule_pipe[1])
if self._inputhook_context:
self._inputhook_context.close()
def _select(*args, **kwargs):
"""
Wrapper around select.select.
When the SIGWINCH signal is handled, other system calls, like select
are aborted in Python. This wrapper will retry the system call.
"""
while True:
try:
return select.select(*args, **kwargs)
except select.error as e:
# Retry select call when EINTR
if e.args and e.args[0] == errno.EINTR:
continue
else:
raise
class call_on_sigwinch(object):
"""
Context manager which Installs a SIGWINCH callback.
(This signal occurs when the terminal size changes.)
"""
def __init__(self, callback):
self.callback = callback
self.previous_callback = None
def __enter__(self):
self.previous_callback = signal.signal(signal.SIGWINCH, lambda *a: self.callback())
def __exit__(self, *a, **kw):
signal.signal(signal.SIGWINCH, self.previous_callback)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductDownloadableContent'
db.create_table(u'shop_productdownloadablecontent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('_order', self.gf('django.db.models.fields.IntegerField')(null=True)),
('file', self.gf('mezzanine.core.fields.FileField')(max_length=255, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='downloadables', to=orm['shop.Product'])),
))
db.send_create_signal(u'shop', ['ProductDownloadableContent'])
def backwards(self, orm):
# Deleting model 'ProductDownloadableContent'
db.delete_table(u'shop_productdownloadablecontent')
models = {
u'bccf.pagemarquee': {
'Meta': {'object_name': 'PageMarquee'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'pages.page': {
'Meta': {'ordering': "(u'titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'shop.cart': {
'Meta': {'object_name': 'Cart'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
u'shop.category': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Category', '_ormbases': [u'pages.Page']},
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': u"orm['shop.ProductOption']"}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
u'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': u"orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'paypal'", 'max_length': '6'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tax_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'tax_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': u"orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': u"orm['shop.Product']"})
},
u'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': u"orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'shop.productdownloadablecontent': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'ProductDownloadableContent'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'file': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'downloadables'", 'to': u"orm['shop.Product']"})
},
u'shop.productimage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['shop.Product']"})
},
u'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option3': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option4': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option5': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': u"orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
u'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': u"orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) The python-semanticversion project
# This code is distributed under the two-clause BSD License.
"""Test the various functions from 'base'."""
import unittest
import sys
from semantic_version import base
class TopLevelTestCase(unittest.TestCase):
"""Test module-level functions."""
if sys.version_info[0] <= 2:
import contextlib
@contextlib.contextmanager
def subTest(self, **kwargs):
yield
versions = (
('0.1.0', '0.1.1', -1),
('0.1.1', '0.1.1', 0),
('0.1.1', '0.1.0', 1),
('0.1.0-alpha', '0.1.0', -1),
('0.1.0-alpha+2', '0.1.0-alpha', NotImplemented),
)
def test_compare(self):
for a, b, expected in self.versions:
with self.subTest(a=a, b=b):
result = base.compare(a, b)
self.assertEqual(
expected, result,
"compare(%r, %r) should be %r instead of %r" % (a, b, expected, result))
matches = (
('>=0.1.1', '0.1.2'),
('>=0.1.1', '0.1.1'),
('>=0.1.1', '0.1.2-alpha'),
('>=0.1.1,!=0.2.0', '0.2.1'),
)
def test_match(self):
for spec, version in self.matches:
with self.subTest(spec=spec, version=version):
self.assertTrue(
base.match(spec, version),
"%r should accept %r" % (spec, version))
valid_strings = (
'1.0.0-alpha',
'1.0.0-alpha.1',
'1.0.0-beta.2',
'1.0.0-beta.11',
'1.0.0-rc.1',
'1.0.0-rc.1+build.1',
'1.0.0',
'1.0.0+0.3.7',
'1.3.7+build',
'1.3.7+build.2.b8f12d7',
'1.3.7+build.11.e0f985a',
'1.1.1',
'1.1.2',
'1.1.3-rc4.5',
'1.1.3-rc42.3-14-15.24+build.2012-04-13.223',
'1.1.3+build.2012-04-13.HUY.alpha-12.1',
)
def test_validate_valid(self):
for version in self.valid_strings:
with self.subTest(version=version):
self.assertTrue(
base.validate(version),
"%r should be a valid version" % (version,))
invalid_strings = (
'1',
'v1',
'1.2.3.4',
'1.2',
'1.2a3',
'1.2.3a4',
'v12.34.5',
'1.2.3+4+5',
)
def test_validate_invalid(self):
for version in self.invalid_strings:
with self.subTest(version=version):
self.assertFalse(
base.validate(version),
"%r should not be a valid version" % (version,))
class VersionTestCase(unittest.TestCase):
if sys.version_info[0] <= 2:
import contextlib
@contextlib.contextmanager
def subTest(self, **kwargs):
yield
versions = {
'1.0.0-alpha': (1, 0, 0, ('alpha',), ()),
'1.0.0-alpha.1': (1, 0, 0, ('alpha', '1'), ()),
'1.0.0-beta.2': (1, 0, 0, ('beta', '2'), ()),
'1.0.0-beta.11': (1, 0, 0, ('beta', '11'), ()),
'1.0.0-rc.1': (1, 0, 0, ('rc', '1'), ()),
'1.0.0-rc.1+build.1': (1, 0, 0, ('rc', '1'), ('build', '1')),
'1.0.0': (1, 0, 0, (), ()),
'1.0.0+0.3.7': (1, 0, 0, (), ('0', '3', '7')),
'1.3.7+build': (1, 3, 7, (), ('build',)),
'1.3.7+build.2.b8f12d7': (1, 3, 7, (), ('build', '2', 'b8f12d7')),
'1.3.7+build.11.e0f985a': (1, 3, 7, (), ('build', '11', 'e0f985a')),
'1.1.1': (1, 1, 1, (), ()),
'1.1.2': (1, 1, 2, (), ()),
'1.1.3-rc4.5': (1, 1, 3, ('rc4', '5'), ()),
'1.1.3-rc42.3-14-15.24+build.2012-04-13.223':
(1, 1, 3, ('rc42', '3-14-15', '24'), ('build', '2012-04-13', '223')),
'1.1.3+build.2012-04-13.HUY.alpha-12.1':
(1, 1, 3, (), ('build', '2012-04-13', 'HUY', 'alpha-12', '1')),
}
def test_parsing(self):
for text, expected_fields in self.versions.items():
with self.subTest(text=text):
version = base.Version(text)
actual_fields = (
version.major, version.minor, version.patch,
version.prerelease, version.build)
self.assertEqual(expected_fields, actual_fields)
def test_str(self):
for text in self.versions:
with self.subTest(text=text):
version = base.Version(text)
self.assertEqual(text, str(version))
self.assertEqual("Version('%s')" % text, repr(version))
def test_compare_to_self(self):
for text in self.versions:
with self.subTest(text=text):
self.assertEqual(base.Version(text), base.Version(text))
self.assertNotEqual(text, base.Version(text))
partial_versions = {
'1.1': (1, 1, None, None, None),
'2': (2, None, None, None, None),
'1.0.0-alpha': (1, 0, 0, ('alpha',), None),
'1.0.0-alpha.1': (1, 0, 0, ('alpha', '1'), None),
'1.0.0-beta.2': (1, 0, 0, ('beta', '2'), None),
'1.0.0-beta.11': (1, 0, 0, ('beta', '11'), None),
'1.0.0-rc.1': (1, 0, 0, ('rc', '1'), None),
'1.0.0': (1, 0, 0, None, None),
'1.1.1': (1, 1, 1, None, None),
'1.1.2': (1, 1, 2, None, None),
'1.1.3-rc4.5': (1, 1, 3, ('rc4', '5'), None),
'1.0.0-': (1, 0, 0, (), None),
'1.0.0-rc.1+build.1': (1, 0, 0, ('rc', '1'), ('build', '1')),
'1.0.0+0.3.7': (1, 0, 0, (), ('0', '3', '7')),
'1.3.7+build': (1, 3, 7, (), ('build',)),
'1.3.7+build.2.b8f12d7': (1, 3, 7, (), ('build', '2', 'b8f12d7')),
'1.3.7+build.11.e0f985a': (1, 3, 7, (), ('build', '11', 'e0f985a')),
'1.1.3-rc42.3-14-15.24+build.2012-04-13.223':
(1, 1, 3, ('rc42', '3-14-15', '24'), ('build', '2012-04-13', '223')),
'1.1.3+build.2012-04-13.HUY.alpha-12.1':
(1, 1, 3, (), ('build', '2012-04-13', 'HUY', 'alpha-12', '1')),
}
def test_parsing_partials(self):
for text, expected_fields in self.partial_versions.items():
with self.subTest(text=text):
version = base.Version(text, partial=True)
actual_fields = (
version.major, version.minor, version.patch,
version.prerelease, version.build)
self.assertEqual(expected_fields, actual_fields)
self.assertTrue(version.partial, "%r should have partial=True" % version)
def test_str_partials(self):
for text in self.partial_versions:
with self.subTest(text=text):
version = base.Version(text, partial=True)
self.assertEqual(text, str(version))
self.assertEqual("Version('%s', partial=True)" % text, repr(version))
def test_compare_partial_to_self(self):
for text in self.partial_versions:
with self.subTest(text=text):
self.assertEqual(
base.Version(text, partial=True),
base.Version(text, partial=True))
self.assertNotEqual(text, base.Version(text, partial=True))
def test_hash(self):
self.assertEqual(
1,
len(set([base.Version('0.1.0'), base.Version('0.1.0')])))
self.assertEqual(
2,
len(set([base.Version('0.1.0'), base.Version('0.1.0', partial=True)])))
# A fully-defined 'partial' version isn't actually partial.
self.assertEqual(
1,
len(set([
base.Version('0.1.0-a1+34'),
base.Version('0.1.0-a1+34', partial=True)
]))
)
@unittest.skipIf(sys.version_info[0] <= 2, "Comparisons don't raise TypeError in Python 2")
def test_invalid_comparisons(self):
v = base.Version('0.1.0')
with self.assertRaises(TypeError):
v < '0.1.0'
with self.assertRaises(TypeError):
v <= '0.1.0'
with self.assertRaises(TypeError):
v > '0.1.0'
with self.assertRaises(TypeError):
v >= '0.1.0'
self.assertTrue(v != '0.1.0')
self.assertFalse(v == '0.1.0')
def test_bump_clean_versions(self):
# We Test each property explicitly as the == comparator for versions
# does not distinguish between prerelease or builds for equality.
v = base.Version('1.0.0+build')
v = v.next_major()
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.0+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.0+build')
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 1)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.1.0+build')
v = v.next_major()
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.1.0+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 2)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.1.0+build')
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
self.assertEqual(v.patch, 1)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.1+build')
v = v.next_major()
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.1+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.1+build')
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 2)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
def test_bump_prerelease_versions(self):
# We Test each property explicitly as the == comparator for versions
# does not distinguish between prerelease or builds for equality.
v = base.Version('1.0.0-pre+build')
v = v.next_major()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.0-pre+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.0-pre+build')
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.1.0-pre+build')
v = v.next_major()
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.1.0-pre+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.1.0-pre+build')
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.1-pre+build')
v = v.next_major()
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.1-pre+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
v = base.Version('1.0.1-pre+build')
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 0)
self.assertEqual(v.patch, 1)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
class SpecItemTestCase(unittest.TestCase):
if sys.version_info[0] <= 2:
import contextlib
@contextlib.contextmanager
def subTest(self, **kwargs):
yield
invalids = [
'<=0.1.1+build3',
'<=0.1.1+',
'>0.2.3-rc2+',
]
def test_invalids(self):
for invalid in self.invalids:
with self.subTest(invalid=invalid):
with self.assertRaises(ValueError, msg="SpecItem(%r) should be invalid" % invalid):
base.SpecItem(invalid)
components = {
'==0.1.0': (base.SpecItem.KIND_EQUAL, 0, 1, 0, None, None),
'==0.1.2-rc3': (base.SpecItem.KIND_EQUAL, 0, 1, 2, ('rc3',), None),
'==0.1.2+build3.14': (base.SpecItem.KIND_EQUAL, 0, 1, 2, (), ('build3', '14')),
'<=0.1.1': (base.SpecItem.KIND_LTE, 0, 1, 1, None, None),
'<0.1.1': (base.SpecItem.KIND_LT, 0, 1, 1, None, None),
'!=0.1.1+': (base.SpecItem.KIND_NEQ, 0, 1, 1, (), ()),
'<=0.1.1-': (base.SpecItem.KIND_LTE, 0, 1, 1, (), None),
'>=0.2.3-rc2': (base.SpecItem.KIND_GTE, 0, 2, 3, ('rc2',), None),
'>=2.0.0': (base.SpecItem.KIND_GTE, 2, 0, 0, None, None),
'!=0.1.1+rc3': (base.SpecItem.KIND_NEQ, 0, 1, 1, (), ('rc3',)),
'!=0.3.0': (base.SpecItem.KIND_NEQ, 0, 3, 0, None, None),
'=0.3.0': (base.SpecItem.KIND_EQUAL, 0, 3, 0, None, None),
'0.3.0': (base.SpecItem.KIND_EQUAL, 0, 3, 0, None, None),
'~0.1.2': (base.SpecItem.KIND_TILDE, 0, 1, 2, None, None),
'^0.1.3': (base.SpecItem.KIND_CARET, 0, 1, 3, None, None),
}
def test_components(self):
for spec_text, components in self.components.items():
with self.subTest(spec_text=spec_text):
kind, major, minor, patch, prerelease, build = components
spec = base.SpecItem(spec_text)
self.assertEqual(kind, spec.kind)
self.assertEqual(major, spec.spec.major)
self.assertEqual(minor, spec.spec.minor)
self.assertEqual(patch, spec.spec.patch)
self.assertEqual(prerelease, spec.spec.prerelease)
self.assertEqual(build, spec.spec.build)
matches = {
'==0.1.0': (
['0.1.0', '0.1.0+build1'],
['0.0.1', '0.1.0-rc1', '0.2.0', '0.1.1', '0.1.0-rc1+build2'],
),
'=0.1.0': (
['0.1.0', '0.1.0+build1'],
['0.0.1', '0.1.0-rc1', '0.2.0', '0.1.1', '0.1.0-rc1+build2'],
),
'0.1.0': (
['0.1.0', '0.1.0+build1'],
['0.0.1', '0.1.0-rc1', '0.2.0', '0.1.1', '0.1.0-rc1+build2'],
),
'==0.1.2-rc3': (
['0.1.2-rc3+build1', '0.1.2-rc3+build4.5'],
['0.1.2-rc4', '0.1.2', '0.1.3'],
),
'==0.1.2+build3.14': (
['0.1.2+build3.14'],
['0.1.2-rc+build3.14', '0.1.2+build3.15'],
),
'<=0.1.1': (
['0.0.0', '0.1.1-alpha1', '0.1.1', '0.1.1+build2'],
['0.1.2'],
),
'<0.1.1': (
['0.1.0', '0.0.0'],
['0.1.1', '0.1.1-zzz+999', '1.2.0', '0.1.1+build3'],
),
'<=0.1.2': (
['0.1.2+build4', '0.1.2-alpha', '0.1.0'],
['0.2.3', '1.1.1', '0.1.3'],
),
'<0.1.1-': (
['0.1.0', '0.1.1-alpha', '0.1.1-alpha+4'],
['0.2.0', '1.0.0', '0.1.1', '0.1.1+build1'],
),
'>=0.2.3-rc2': (
['0.2.3-rc3', '0.2.3', '0.2.3+1', '0.2.3-rc2', '0.2.3-rc2+1'],
['0.2.3-rc1', '0.2.2'],
),
'>=0.2.3': (
['0.2.3', '0.2.3+1'],
['0.2.3-rc3', '0.2.3-rc2', '0.2.3-rc2+1', '0.2.3-rc1', '0.2.2'],
),
'==0.2.3+': (
['0.2.3'],
['0.2.3+rc1', '0.2.4', '0.2.3-rc2'],
),
'!=0.2.3-rc2+12': (
['0.2.3-rc3', '0.2.3', '0.2.3-rc2+1', '0.2.4', '0.2.3-rc3+12'],
['0.2.3-rc2+12'],
),
'==2.0.0+b1': (
['2.0.0+b1'],
['2.1.1', '1.9.9', '1.9.9999', '2.0.0', '2.0.0-rc4'],
),
'!=0.1.1': (
['0.1.2', '0.1.0', '1.4.2'],
['0.1.1', '0.1.1-alpha', '0.1.1+b1'],
),
'!=0.3.4-': (
['0.4.0', '1.3.0', '0.3.4-alpha', '0.3.4-alpha+b1'],
['0.3.4', '0.3.4+b1'],
),
'~1.1.2': (
['1.1.3', '1.1.2+b1'],
['1.1.1', '1.1.2-alpha', '1.1.2-alpha+b1', '1.2.1', '2.1.0'],
),
'^1.1.2': (
['1.1.3', '1.1.2+b1', '1.2.1'],
['1.1.1', '1.1.2-alpha', '1.1.2-alpha+b1', '2.1.0'],
),
'^0.1.2': (
['0.1.2', '0.1.2+b1', '0.1.3'],
['0.1.2-alpha', '0.2.0', '1.1.2', '0.1.1'],
),
'^0.0.2': (
['0.0.2', '0.0.2+abb'],
['0.0.2-alpha', '0.1.0', '0.0.3', '1.0.0'],
),
'~=1.4.5': (
['1.4.5', '1.4.10-alpha', '1.4.10'],
['1.3.6', '1.4.4', '1.4.5-alpha', '1.5.0'],
),
'~=1.4.0': (
['1.4.0', '1.4.10-alpha', '1.4.10'],
['1.3.6', '1.3.9', '1.4.0-alpha', '1.5.0'],
),
'~=1.4': (
['1.4.0', '1.6.10-alpha', '1.6.10'],
['1.3.0', '1.4.0-alpha', '2.0.0'],
),
}
def test_matches(self):
for spec_text, versions in self.matches.items():
spec = base.SpecItem(spec_text)
matching, failing = versions
for version_text in matching:
with self.subTest(spec=spec_text, version=version_text):
version = base.Version(version_text)
self.assertTrue(spec.match(version), "%r should match %r" % (version, spec))
for version_text in failing:
with self.subTest(spec=spec_text, excluded=version_text):
version = base.Version(version_text)
self.assertFalse(
spec.match(version),
"%r should not match %r" % (version, spec))
def test_equality(self):
spec1 = base.SpecItem('==0.1.0')
spec2 = base.SpecItem('==0.1.0')
self.assertEqual(spec1, spec2)
self.assertFalse(spec1 == '==0.1.0')
def test_to_string(self):
spec = base.SpecItem('==0.1.0')
self.assertEqual('==0.1.0', str(spec))
self.assertEqual(base.SpecItem.KIND_EQUAL, spec.kind)
def test_hash(self):
self.assertEqual(
1,
len(set([base.SpecItem('==0.1.0'), base.SpecItem('==0.1.0')])))
class CoerceTestCase(unittest.TestCase):
if sys.version_info[0] <= 2:
import contextlib
@contextlib.contextmanager
def subTest(self, **kwargs):
yield
examples = {
# Dict of target: [list of equivalents]
'0.0.0': ('0', '0.0', '0.0.0', '0.0.0+', '0-'),
'0.1.0': ('0.1', '0.1+', '0.1-', '0.1.0', '0.01.0', '000.0001.0000000000'),
'0.1.0+2': ('0.1.0+2', '0.1.0.2'),
'0.1.0+2.3.4': ('0.1.0+2.3.4', '0.1.0+2+3+4', '0.1.0.2+3+4'),
'0.1.0+2-3.4': ('0.1.0+2-3.4', '0.1.0+2-3+4', '0.1.0.2-3+4', '0.1.0.2_3+4'),
'0.1.0-a2.3': ('0.1.0-a2.3', '0.1.0a2.3', '0.1.0_a2.3'),
'0.1.0-a2.3+4.5-6': ('0.1.0-a2.3+4.5-6', '0.1.0a2.3+4.5-6', '0.1.0a2.3+4.5_6', '0.1.0a2.3+4+5/6'),
}
def test_coerce(self):
for equivalent, samples in self.examples.items():
target = base.Version(equivalent)
for sample in samples:
with self.subTest(target=equivalent, sample=sample):
v_sample = base.Version.coerce(sample)
self.assertEqual(target, v_sample)
def test_invalid(self):
self.assertRaises(ValueError, base.Version.coerce, 'v1')
class SpecTestCase(unittest.TestCase):
if sys.version_info[0] <= 2:
import contextlib
@contextlib.contextmanager
def subTest(self, **kwargs):
yield
def assertCountEqual(self, a, b):
import collections
self.assertEqual(
collections.Counter(a),
collections.Counter(b),
)
examples = {
'>=0.1.1,<0.1.2': ['>=0.1.1', '<0.1.2'],
'>=0.1.0,!=0.1.3-rc1,<0.1.3': ['>=0.1.0', '!=0.1.3-rc1', '<0.1.3'],
'=0.1.2': ['==0.1.2'],
'>=0.1.2': ['>=0.1.2'],
'^1.2.3': ['>=1.2.3', '<2.0.0'],
'~=1.2.3': ['>=1.2.3', '<1.3.0'],
}
def test_parsing(self):
for spec_list_text, specs in self.examples.items():
with self.subTest(spec=spec_list_text):
spec_list = base.Spec(spec_list_text)
self.assertEqual(spec_list_text, str(spec_list))
self.assertNotEqual(spec_list_text, spec_list)
self.assertCountEqual(specs, [str(spec) for spec in spec_list])
split_examples = {
('>=0.1.1', '<0.1.2', '!=0.1.1+build1'): ['>=0.1.1', '<0.1.2', '!=0.1.1+build1'],
('>=0.1.0', '!=0.1.3-rc1,<0.1.3'): ['>=0.1.0', '!=0.1.3-rc1', '<0.1.3'],
}
def test_parsing_split(self):
for spec_list_texts, specs in self.split_examples.items():
with self.subTest(spec=spec_list_texts):
spec_list = base.Spec(*spec_list_texts)
self.assertEqual(','.join(spec_list_texts), str(spec_list))
self.assertCountEqual(specs, [str(spec) for spec in spec_list])
self.assertEqual(spec_list, base.Spec(','.join(spec_list_texts)))
for spec_text in specs:
self.assertIn(str(base.SpecItem(spec_text)), repr(spec_list))
matches = {
# At least 0.1.1 excluding pre-releases, less than 0.1.2 excluding pre-releases
'>=0.1.1,<0.1.2': (
['0.1.1', '0.1.1+4'],
['0.1.1-alpha', '0.1.2-alpha', '0.1.2', '1.3.4'],
),
# 0.1.x
'==0.1.*': (
['0.1.1', '0.1.1+4', '0.1.0', '0.1.99'],
['0.1.0-alpha', '0.0.1', '0.2.0'],
),
# 1.x.x
'==1.*': (
['1.1.1', '1.1.0+4', '1.1.0', '1.99.99'],
['1.0.0-alpha', '0.1.0', '2.0.0'],
),
# At least 0.1.0 with pre-releases, less than 0.1.4 excluding pre-releases,
# neither 0.1.3-rc1 nor any build of that version,
# not 0.1.0+b3 precisely
'>=0.1.0-,!=0.1.3-rc1,!=0.1.0+b3,<0.1.4': (
['0.1.1', '0.1.0+b4', '0.1.2', '0.1.3-rc2'],
['0.0.1', '0.1.0+b3', '0.1.4', '0.1.4-alpha', '0.1.3-rc1+4',
'0.1.0-alpha', '0.2.2', '0.1.4-rc1'],
),
}
def test_matches(self):
for spec_list_text, versions in self.matches.items():
spec_list = base.Spec(spec_list_text)
matching, failing = versions
for version_text in matching:
with self.subTest(spec=spec_list_text, matching=version_text):
version = base.Version(version_text)
self.assertTrue(
version in spec_list,
"%r should be in %r" % (version, spec_list))
self.assertTrue(
spec_list.match(version),
"%r should match %r" % (version, spec_list))
for version_text in failing:
with self.subTest(spec=spec_list_text, excluded=version_text):
version = base.Version(version_text)
self.assertFalse(
version in spec_list,
"%r should not be in %r" % (version, spec_list))
self.assertFalse(
spec_list.match(version),
"%r should not match %r" % (version, spec_list))
def test_equality(self):
for spec_list_text in self.examples:
with self.subTest(spec=spec_list_text):
slist1 = base.Spec(spec_list_text)
slist2 = base.Spec(spec_list_text)
self.assertEqual(slist1, slist2)
self.assertFalse(slist1 == spec_list_text)
def test_filter_empty(self):
s = base.Spec('>=0.1.1')
res = tuple(s.filter(()))
self.assertEqual((), res)
def test_filter_incompatible(self):
s = base.Spec('>=0.1.1,!=0.1.4')
res = tuple(s.filter([
base.Version('0.1.0'),
base.Version('0.1.4'),
base.Version('0.1.4-alpha'),
]))
self.assertEqual((), res)
def test_filter_compatible(self):
s = base.Spec('>=0.1.1,!=0.1.4,<0.2.0')
res = tuple(s.filter([
base.Version('0.1.0'),
base.Version('0.1.1'),
base.Version('0.1.5'),
base.Version('0.1.4-alpha'),
base.Version('0.1.2'),
base.Version('0.2.0-rc1'),
base.Version('3.14.15'),
]))
expected = (
base.Version('0.1.1'),
base.Version('0.1.5'),
base.Version('0.1.2'),
)
self.assertEqual(expected, res)
def test_select_empty(self):
s = base.Spec('>=0.1.1')
self.assertIsNone(s.select(()))
def test_select_incompatible(self):
s = base.Spec('>=0.1.1,!=0.1.4')
res = s.select([
base.Version('0.1.0'),
base.Version('0.1.4'),
base.Version('0.1.4-alpha'),
])
self.assertIsNone(res)
def test_select_compatible(self):
s = base.Spec('>=0.1.1,!=0.1.4,<0.2.0')
res = s.select([
base.Version('0.1.0'),
base.Version('0.1.1'),
base.Version('0.1.5'),
base.Version('0.1.4-alpha'),
base.Version('0.1.2'),
base.Version('0.2.0-rc1'),
base.Version('3.14.15'),
])
self.assertEqual(base.Version('0.1.5'), res)
def test_contains(self):
self.assertFalse('ii' in base.Spec('>=0.1.1'))
def test_hash(self):
self.assertEqual(
1,
len(set([base.Spec('>=0.1.1'), base.Spec('>=0.1.1')])))
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
|
#Plots the Av magnitude due to the balmer decerment
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#The location to store the scale and its stddev of each line
qualdatapath = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
#USIG ERRORS
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
'''
#BIWT DUP ERRORS
#File with the error array
errdatapath = '/Users/blorenz/COSMOS/COSMOSData/biwt_dup.txt'
#Read in the scale of the lines
err_df = ascii.read(errdatapath,data_start=1,header_start=0,format='csv').to_pandas()
'''
#File to write the Av array to
dataout = '/Users/blorenz/COSMOS/COSMOSData/balmer_avs.txt'
#The location of the muzzin et al data:
mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat'
#Read in the muzzin data
mdata = ascii.read(mdatapath).to_pandas()
mdata = mdata.rename(columns={'ID':'OBJID'})
fluxdata = pd.merge(fluxdata,mdata)
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
lines = ['6563_fix','4861','4340']
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Create the figure
fig,axarr = plt.subplots(3,3,figsize=(25,22))
#Plotting parameters
ms = 3
lw=0.5
mark='o'
#Set the Rv value - this one is taken for Calzetti
Rv = 4.05 #+-0.80
#Reddening law from Calzetti et al (2000)
def Calzetti_k(wave):
waveum = wave*.0001
if ((waveum >= 0.63) and (waveum <= 2.2)):
k = 2.659*(-1.857+divz(1.040,waveum))+Rv
if ((waveum < 0.63) and (waveum >= 0.12)):
k = 2.659*(-2.156+divz(1.509,waveum)-divz(0.198,waveum**2)+divz(0.011,waveum**3))+Rv
return k
#Finds the ratio and errors in that ratio of any two lines
def getAv(pd_df,err_df,L1,L2,bdec):
#Calibrate the fluxes by dividing by scale
calL1 = divz(pd_df[L1+'_flux'],pd_df[L1+'_scale'])
calL2 = divz(pd_df[L2+'_flux'],pd_df[L2+'_scale'])
#Find the ratio
rat = divz(calL1,calL2)
#Find the error in the ratio
erat = np.sqrt((divz(1,calL2) * err_df[L1])**2 + (divz(-calL1,(calL2**2)) * err_df[L2])**2)
#Get the integer of the line
if len(L1)==8: iL1=int(L1[0:4])
else: iL1 = int(L1)
if len(L2)==8: iL2=int(L2[0:4])
else: iL2 = int(L2)
#Get the k value for each line
L1k = Calzetti_k(iL1)
L2k = Calzetti_k(iL2)
#Compute the Av
Av = divz(np.log10(rat/bdec),(0.4*((L2k/Rv)-(L1k/Rv))))
#And its error
eAv = divz((1/np.log(10))*divz((erat/bdec),rat),(0.4*((L2k/Rv)-(L1k/Rv))))
return Av,eAv
d = {'True': True, 'False': False}
lines0 = ['6563_fix','4861']
lines1 = ['4861','4340']
lines2 = ['6563_fix','4340']
c = 0
Av_df = pd.DataFrame()
#Add the fluxfile so that this can later be merged with the main frame
Av_df['fluxfile'] = fluxdata['fluxfile']
Av_df['LMASS'] = fluxdata['LMASS']
for lines in [lines0,lines1,lines2]:
#Filter the data
goodlines = [dataqual[line+'_good'].map(d) for line in lines]
#Needs to be good in all lines to be good
allgood = np.logical_and.reduce(goodlines)
#Needs to be bad in any line to be bad
badlines = [dataqual[line+'_bad'].map(d) for line in lines]
baddata = np.logical_or.reduce(badlines)
lowlines = [dataqual[line+'_low'].map(d) for line in lines]
#Needs to be low in any line to be low, and also not bad in a line
somelow = np.logical_and(np.logical_or.reduce(lowlines),np.logical_not(baddata))
if c==0:
bdec=2.86
elif c==1:
bdec=2.137
else:
bdec=2.86*2.137
Av,eAv = getAv(fluxdata,err_df,lines[0],lines[1],bdec)
if c==0:
Av_df['AvHaHb'] = Av
Av_df['AvHaHberr'] = eAv
elif c==1:
Av_df['AvHbHg'] = Av
Av_df['AvHbHgerr'] = eAv
elif c==2:
Av_df['AvHaHg'] = Av
Av_df['AvHaHgerr'] = eAv
c=c+1
#Get the average between the two good Avs and its error
Av_df['AvHa_avg'] = (Av_df['AvHaHb']+Av_df['AvHaHg'])/2
Av_df['AvHa_avgerr'] = (Av_df['AvHaHberr']+Av_df['AvHaHgerr'])/2
#Find the mass_weighted medians
mr1 = (fluxdata['LMASS']<9.25)
mr2 = np.logical_and(fluxdata['LMASS']>=9.25,fluxdata['LMASS']<9.5)
mr3 = np.logical_and(fluxdata['LMASS']>=9.5,fluxdata['LMASS']<9.75)
mr4 = (fluxdata['LMASS']>=9.75)
med1 = np.median(Av_df[np.logical_and(allgood,mr1)]['AvHaHb'])
med2 = np.median(Av_df[np.logical_and(allgood,mr2)]['AvHaHb'])
med3 = np.median(Av_df[np.logical_and(allgood,mr3)]['AvHaHb'])
med4 = np.median(Av_df[np.logical_and(allgood,mr4)]['AvHaHb'])
'''
#Linear fit for the medians
coeff = np.polyfit(fluxdata[goodidx]['LMASS'],AvHaHg[goodidx],1)
'''
Av_df = Av_df.replace(-np.inf,-99.99999999999)
d = {'True': True, 'False': False}
ulim=4 #Upper Av limit to consider good
#Number of stddevs away form median to be good
sig = 2
for i in range(0,len(fluxdata)):
'''
use 0 - 'AvHaHb'
use 1 - 'AvHbHg'
use 2 - 'AvHaHg'
use 3 - 'AvMedian'
use 4 - 'AvHa_avf'
'''
row = fluxdata.iloc[i]
#Mass-weighted medians
if (row['LMASS'] < 9.25): Av_df.at[i,'AvMedian'] = med1
elif np.logical_and(row['LMASS'] >= 9.25,row['LMASS'] < 9.5,): Av_df.at[i,'AvMedian'] = med2
elif np.logical_and(row['LMASS'] >= 9.5,row['LMASS'] < 9.75): Av_df.at[i,'AvMedian'] = med3
elif (row['LMASS'] >= 9.75): Av_df.at[i,'AvMedian'] = med4
'''
#Linear fit for medians
Av_df.at[i,'AvMedian'] = coeff[0]*row['LMASS']+coeff[1]
'''
Avrow = Av_df.iloc[i]
if np.logical_or((Avrow['AvHaHb'] < 0),((Avrow['AvHaHb'] > ulim))): cHaHb = 10**80
else: cHaHb = Avrow['AvHaHb']
if np.logical_or((Avrow['AvHbHg'] < 0),((Avrow['AvHbHg'] > ulim))): cHbHg = 10**90
else: cHbHg = Avrow['AvHbHg']
if np.logical_or((Avrow['AvHaHg'] < 0),((Avrow['AvHaHg'] > ulim))): cHaHg = 10**100
else: cHaHg = Avrow['AvHaHg']
use = 3
#Find out which lines are good. (Ha,Hb,Hg)
l1g = dataqual['6563_fix_good'].map(d).iloc[i]
l2g = dataqual['4861_good'].map(d).iloc[i]
l3g = dataqual['4340_good'].map(d).iloc[i]
goodlines = (l1g,l2g,l3g)
#If only Ha and Hb are good, check those
if goodlines == (1,1,0):
Av_df.at[i,'AvHaHbok'] = Avrow['AvHaHb']
if (cHaHb < 10): use=0
#If only Ha and Hg are good, check those
if goodlines == (1,0,1):
if (cHaHg < 10): use=1
#If all lines are good,
if goodlines == (1,1,1):
#Compare HaHb and HaHg. if they are within each other's errors, average them
diff = np.abs(cHaHb-cHaHg)
err = Avrow['AvHaHberr']+Avrow['AvHaHgerr']
#If they are within each other's errors, check if the error bars are large on one of the measurements
if (diff < err):
if (divz(Avrow['AvHaHberr'],Avrow['AvHaHgerr']) < 0.5):
if (cHaHb < 10): use=0
elif (divz(Avrow['AvHaHberr'],Avrow['AvHaHgerr']) > 2):
if (cHaHg < 10): use=1
else:
if (Avrow['AvHa_avg'] > 0): use = 4
#If they are not close, use whichever is closest to the median
else:
diffHaHb = np.abs(cHaHb-Avrow['AvMedian'])
diffHaHg = np.abs(cHaHg-Avrow['AvMedian'])
arr = np.array([diffHaHb,diffHaHg])
if (5 > arr[np.argmin(arr)]):
use = np.argmin(arr)
if use == 0: usestr = 'AvHaHb'
elif use == 1: usestr = 'AvHaHg'
elif use == 3: usestr = 'AvMedian'
elif use == 4: usestr = 'AvHa_avg'
Av_df.at[i,'useAv'] = usestr
#Write to csv
Av_df = Av_df.reindex(sorted(Av_df.columns), axis=1)
Av_df.to_csv(dataout,index=False)
|
|
"""Support for the Tuya lights."""
from __future__ import annotations
from dataclasses import dataclass
import json
from typing import Any, cast
from tuya_iot import TuyaDevice, TuyaDeviceManager
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
LightEntity,
LightEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import HomeAssistantTuyaData
from .base import IntegerTypeData, TuyaEntity
from .const import DOMAIN, TUYA_DISCOVERY_NEW, DPCode, DPType, WorkMode
from .util import remap_value
@dataclass
class ColorTypeData:
"""Color Type Data."""
h_type: IntegerTypeData
s_type: IntegerTypeData
v_type: IntegerTypeData
DEFAULT_COLOR_TYPE_DATA = ColorTypeData(
h_type=IntegerTypeData(DPCode.COLOUR_DATA_HSV, min=1, scale=0, max=360, step=1),
s_type=IntegerTypeData(DPCode.COLOUR_DATA_HSV, min=1, scale=0, max=255, step=1),
v_type=IntegerTypeData(DPCode.COLOUR_DATA_HSV, min=1, scale=0, max=255, step=1),
)
DEFAULT_COLOR_TYPE_DATA_V2 = ColorTypeData(
h_type=IntegerTypeData(DPCode.COLOUR_DATA_HSV, min=1, scale=0, max=360, step=1),
s_type=IntegerTypeData(DPCode.COLOUR_DATA_HSV, min=1, scale=0, max=1000, step=1),
v_type=IntegerTypeData(DPCode.COLOUR_DATA_HSV, min=1, scale=0, max=1000, step=1),
)
@dataclass
class TuyaLightEntityDescription(LightEntityDescription):
"""Describe an Tuya light entity."""
brightness_max: DPCode | None = None
brightness_min: DPCode | None = None
brightness: DPCode | tuple[DPCode, ...] | None = None
color_data: DPCode | tuple[DPCode, ...] | None = None
color_mode: DPCode | None = None
color_temp: DPCode | tuple[DPCode, ...] | None = None
default_color_type: ColorTypeData = DEFAULT_COLOR_TYPE_DATA
LIGHTS: dict[str, tuple[TuyaLightEntityDescription, ...]] = {
# Curtain Switch
# https://developer.tuya.com/en/docs/iot/category-clkg?id=Kaiuz0gitil39
"clkg": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_BACKLIGHT,
name="Backlight",
entity_category=EntityCategory.CONFIG,
),
),
# String Lights
# https://developer.tuya.com/en/docs/iot/dc?id=Kaof7taxmvadu
"dc": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Strip Lights
# https://developer.tuya.com/en/docs/iot/dd?id=Kaof804aibg2l
"dd": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
default_color_type=DEFAULT_COLOR_TYPE_DATA_V2,
),
),
# Light
# https://developer.tuya.com/en/docs/iot/categorydj?id=Kaiuyzy3eheyy
"dj": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=(DPCode.BRIGHT_VALUE_V2, DPCode.BRIGHT_VALUE),
color_temp=(DPCode.TEMP_VALUE_V2, DPCode.TEMP_VALUE),
color_data=(DPCode.COLOUR_DATA_V2, DPCode.COLOUR_DATA),
),
# Not documented
# Based on multiple reports: manufacturer customized Dimmer 2 switches
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED_1,
name="Light",
brightness=DPCode.BRIGHT_VALUE_1,
),
),
# Ceiling Fan Light
# https://developer.tuya.com/en/docs/iot/fsd?id=Kaof8eiei4c2v
"fsd": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Ambient Light
# https://developer.tuya.com/en/docs/iot/ambient-light?id=Kaiuz06amhe6g
"fwd": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Motion Sensor Light
# https://developer.tuya.com/en/docs/iot/gyd?id=Kaof8a8hycfmy
"gyd": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Humidifier Light
# https://developer.tuya.com/en/docs/iot/categoryjsq?id=Kaiuz1smr440b
"jsq": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_data=DPCode.COLOUR_DATA_HSV,
),
),
# Switch
# https://developer.tuya.com/en/docs/iot/s?id=K9gf7o5prgf7s
"kg": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_BACKLIGHT,
name="Backlight",
entity_category=EntityCategory.CONFIG,
),
),
# Air conditioner
# https://developer.tuya.com/en/docs/iot/categorykt?id=Kaiuz0z71ov2n
"kt": (
TuyaLightEntityDescription(
key=DPCode.LIGHT,
name="Backlight",
entity_category=EntityCategory.CONFIG,
),
),
# Unknown light product
# Found as VECINO RGBW as provided by diagnostics
# Not documented
"mbd": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Unknown product with light capabilities
# Fond in some diffusers, plugs and PIR flood lights
# Not documented
"qjdcz": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Heater
# https://developer.tuya.com/en/docs/iot/categoryqn?id=Kaiuz18kih0sm
"qn": (
TuyaLightEntityDescription(
key=DPCode.LIGHT,
name="Backlight",
entity_category=EntityCategory.CONFIG,
),
),
# Smart Camera
# https://developer.tuya.com/en/docs/iot/categorysp?id=Kaiuz35leyo12
"sp": (
TuyaLightEntityDescription(
key=DPCode.FLOODLIGHT_SWITCH,
brightness=DPCode.FLOODLIGHT_LIGHTNESS,
name="Floodlight",
),
TuyaLightEntityDescription(
key=DPCode.BASIC_INDICATOR,
name="Indicator Light",
entity_category=EntityCategory.CONFIG,
),
),
# Dimmer Switch
# https://developer.tuya.com/en/docs/iot/categorytgkg?id=Kaiuz0ktx7m0o
"tgkg": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED_1,
name="Light",
brightness=DPCode.BRIGHT_VALUE_1,
brightness_max=DPCode.BRIGHTNESS_MAX_1,
brightness_min=DPCode.BRIGHTNESS_MIN_1,
),
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED_2,
name="Light 2",
brightness=DPCode.BRIGHT_VALUE_2,
brightness_max=DPCode.BRIGHTNESS_MAX_2,
brightness_min=DPCode.BRIGHTNESS_MIN_2,
),
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED_3,
name="Light 3",
brightness=DPCode.BRIGHT_VALUE_3,
brightness_max=DPCode.BRIGHTNESS_MAX_3,
brightness_min=DPCode.BRIGHTNESS_MIN_3,
),
),
# Dimmer
# https://developer.tuya.com/en/docs/iot/tgq?id=Kaof8ke9il4k4
"tgq": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
name="Light",
brightness=(DPCode.BRIGHT_VALUE_V2, DPCode.BRIGHT_VALUE),
brightness_max=DPCode.BRIGHTNESS_MAX_1,
brightness_min=DPCode.BRIGHTNESS_MIN_1,
),
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED_1,
name="Light",
brightness=DPCode.BRIGHT_VALUE_1,
),
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED_2,
name="Light 2",
brightness=DPCode.BRIGHT_VALUE_2,
),
),
# Solar Light
# https://developer.tuya.com/en/docs/iot/tynd?id=Kaof8j02e1t98
"tyndj": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
),
),
# Ceiling Light
# https://developer.tuya.com/en/docs/iot/ceiling-light?id=Kaiuz03xxfc4r
"xdd": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_LED,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
color_data=DPCode.COLOUR_DATA,
),
TuyaLightEntityDescription(
key=DPCode.SWITCH_NIGHT_LIGHT,
name="Night Light",
),
),
# Remote Control
# https://developer.tuya.com/en/docs/iot/ykq?id=Kaof8ljn81aov
"ykq": (
TuyaLightEntityDescription(
key=DPCode.SWITCH_CONTROLLER,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_CONTROLLER,
color_temp=DPCode.TEMP_CONTROLLER,
),
),
# Fan
# https://developer.tuya.com/en/docs/iot/categoryfs?id=Kaiuz1xweel1c
"fs": (
TuyaLightEntityDescription(
key=DPCode.LIGHT,
color_mode=DPCode.WORK_MODE,
brightness=DPCode.BRIGHT_VALUE,
color_temp=DPCode.TEMP_VALUE,
),
),
}
# Socket (duplicate of `kg`)
# https://developer.tuya.com/en/docs/iot/s?id=K9gf7o5prgf7s
LIGHTS["cz"] = LIGHTS["kg"]
# Power Socket (duplicate of `kg`)
# https://developer.tuya.com/en/docs/iot/s?id=K9gf7o5prgf7s
LIGHTS["pc"] = LIGHTS["kg"]
@dataclass
class ColorData:
"""Color Data."""
type_data: ColorTypeData
h_value: int
s_value: int
v_value: int
@property
def hs_color(self) -> tuple[float, float]:
"""Get the HS value from this color data."""
return (
self.type_data.h_type.remap_value_to(self.h_value, 0, 360),
self.type_data.s_type.remap_value_to(self.s_value, 0, 100),
)
@property
def brightness(self) -> int:
"""Get the brightness value from this color data."""
return round(self.type_data.v_type.remap_value_to(self.v_value, 0, 255))
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up tuya light dynamically through tuya discovery."""
hass_data: HomeAssistantTuyaData = hass.data[DOMAIN][entry.entry_id]
@callback
def async_discover_device(device_ids: list[str]):
"""Discover and add a discovered tuya light."""
entities: list[TuyaLightEntity] = []
for device_id in device_ids:
device = hass_data.device_manager.device_map[device_id]
if descriptions := LIGHTS.get(device.category):
for description in descriptions:
if description.key in device.status:
entities.append(
TuyaLightEntity(
device, hass_data.device_manager, description
)
)
async_add_entities(entities)
async_discover_device([*hass_data.device_manager.device_map])
entry.async_on_unload(
async_dispatcher_connect(hass, TUYA_DISCOVERY_NEW, async_discover_device)
)
class TuyaLightEntity(TuyaEntity, LightEntity):
"""Tuya light device."""
entity_description: TuyaLightEntityDescription
_brightness_max: IntegerTypeData | None = None
_brightness_min: IntegerTypeData | None = None
_brightness: IntegerTypeData | None = None
_color_data_dpcode: DPCode | None = None
_color_data_type: ColorTypeData | None = None
_color_mode: DPCode | None = None
_color_temp: IntegerTypeData | None = None
def __init__(
self,
device: TuyaDevice,
device_manager: TuyaDeviceManager,
description: TuyaLightEntityDescription,
) -> None:
"""Init TuyaHaLight."""
super().__init__(device, device_manager)
self.entity_description = description
self._attr_unique_id = f"{super().unique_id}{description.key}"
self._attr_supported_color_modes = {COLOR_MODE_ONOFF}
# Determine DPCodes
self._color_mode_dpcode = self.find_dpcode(
description.color_mode, prefer_function=True
)
if int_type := self.find_dpcode(
description.brightness, dptype=DPType.INTEGER, prefer_function=True
):
self._brightness = int_type
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
self._brightness_max = self.find_dpcode(
description.brightness_max, dptype=DPType.INTEGER
)
self._brightness_min = self.find_dpcode(
description.brightness_min, dptype=DPType.INTEGER
)
if int_type := self.find_dpcode(
description.color_temp, dptype=DPType.INTEGER, prefer_function=True
):
self._color_temp = int_type
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if (
dpcode := self.find_dpcode(description.color_data, prefer_function=True)
) and self.get_dptype(dpcode) == DPType.JSON:
self._color_data_dpcode = dpcode
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if dpcode in self.device.function:
values = cast(str, self.device.function[dpcode].values)
else:
values = self.device.status_range[dpcode].values
# Fetch color data type information
if function_data := json.loads(values):
self._color_data_type = ColorTypeData(
h_type=IntegerTypeData(dpcode, **function_data["h"]),
s_type=IntegerTypeData(dpcode, **function_data["s"]),
v_type=IntegerTypeData(dpcode, **function_data["v"]),
)
else:
# If no type is found, use a default one
self._color_data_type = self.entity_description.default_color_type
if self._color_data_dpcode == DPCode.COLOUR_DATA_V2 or (
self._brightness and self._brightness.max > 255
):
self._color_data_type = DEFAULT_COLOR_TYPE_DATA_V2
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self.device.status.get(self.entity_description.key, False)
def turn_on(self, **kwargs: Any) -> None:
"""Turn on or control the light."""
commands = [{"code": self.entity_description.key, "value": True}]
if self._color_temp and ATTR_COLOR_TEMP in kwargs:
if self._color_mode_dpcode:
commands += [
{
"code": self._color_mode_dpcode,
"value": WorkMode.WHITE,
},
]
commands += [
{
"code": self._color_temp.dpcode,
"value": round(
self._color_temp.remap_value_from(
kwargs[ATTR_COLOR_TEMP],
self.min_mireds,
self.max_mireds,
reverse=True,
)
),
},
]
elif self._color_data_type and (
ATTR_HS_COLOR in kwargs
or (ATTR_BRIGHTNESS in kwargs and self.color_mode == COLOR_MODE_HS)
):
if self._color_mode_dpcode:
commands += [
{
"code": self._color_mode_dpcode,
"value": WorkMode.COLOUR,
},
]
if not (brightness := kwargs.get(ATTR_BRIGHTNESS)):
brightness = self.brightness or 0
if not (color := kwargs.get(ATTR_HS_COLOR)):
color = self.hs_color or (0, 0)
commands += [
{
"code": self._color_data_dpcode,
"value": json.dumps(
{
"h": round(
self._color_data_type.h_type.remap_value_from(
color[0], 0, 360
)
),
"s": round(
self._color_data_type.s_type.remap_value_from(
color[1], 0, 100
)
),
"v": round(
self._color_data_type.v_type.remap_value_from(
brightness
)
),
}
),
},
]
if (
ATTR_BRIGHTNESS in kwargs
and self.color_mode != COLOR_MODE_HS
and self._brightness
):
brightness = kwargs[ATTR_BRIGHTNESS]
# If there is a min/max value, the brightness is actually limited.
# Meaning it is actually not on a 0-255 scale.
if (
self._brightness_max is not None
and self._brightness_min is not None
and (
brightness_max := self.device.status.get(
self._brightness_max.dpcode
)
)
is not None
and (
brightness_min := self.device.status.get(
self._brightness_min.dpcode
)
)
is not None
):
# Remap values onto our scale
brightness_max = self._brightness_max.remap_value_to(brightness_max)
brightness_min = self._brightness_min.remap_value_to(brightness_min)
# Remap the brightness value from their min-max to our 0-255 scale
brightness = remap_value(
brightness,
to_min=brightness_min,
to_max=brightness_max,
)
commands += [
{
"code": self._brightness.dpcode,
"value": round(self._brightness.remap_value_from(brightness)),
},
]
self._send_command(commands)
def turn_off(self, **kwargs: Any) -> None:
"""Instruct the light to turn off."""
self._send_command([{"code": self.entity_description.key, "value": False}])
@property
def brightness(self) -> int | None:
"""Return the brightness of the light."""
# If the light is currently in color mode, extract the brightness from the color data
if self.color_mode == COLOR_MODE_HS and (color_data := self._get_color_data()):
return color_data.brightness
if not self._brightness:
return None
brightness = self.device.status.get(self._brightness.dpcode)
if brightness is None:
return None
# Remap value to our scale
brightness = self._brightness.remap_value_to(brightness)
# If there is a min/max value, the brightness is actually limited.
# Meaning it is actually not on a 0-255 scale.
if (
self._brightness_max is not None
and self._brightness_min is not None
and (brightness_max := self.device.status.get(self._brightness_max.dpcode))
is not None
and (brightness_min := self.device.status.get(self._brightness_min.dpcode))
is not None
):
# Remap values onto our scale
brightness_max = self._brightness_max.remap_value_to(brightness_max)
brightness_min = self._brightness_min.remap_value_to(brightness_min)
# Remap the brightness value from their min-max to our 0-255 scale
brightness = remap_value(
brightness,
from_min=brightness_min,
from_max=brightness_max,
)
return round(brightness)
@property
def color_temp(self) -> int | None:
"""Return the color_temp of the light."""
if not self._color_temp:
return None
temperature = self.device.status.get(self._color_temp.dpcode)
if temperature is None:
return None
return round(
self._color_temp.remap_value_to(
temperature, self.min_mireds, self.max_mireds, reverse=True
)
)
@property
def hs_color(self) -> tuple[float, float] | None:
"""Return the hs_color of the light."""
if self._color_data_dpcode is None or not (
color_data := self._get_color_data()
):
return None
return color_data.hs_color
@property
def color_mode(self) -> str:
"""Return the color_mode of the light."""
# We consider it to be in HS color mode, when work mode is anything
# else than "white".
if (
self._color_mode_dpcode
and self.device.status.get(self._color_mode_dpcode) != WorkMode.WHITE
):
return COLOR_MODE_HS
if self._color_temp:
return COLOR_MODE_COLOR_TEMP
if self._brightness:
return COLOR_MODE_BRIGHTNESS
return COLOR_MODE_ONOFF
def _get_color_data(self) -> ColorData | None:
"""Get current color data from device."""
if (
self._color_data_type is None
or self._color_data_dpcode is None
or self._color_data_dpcode not in self.device.status
):
return None
if not (status_data := self.device.status[self._color_data_dpcode]):
return None
if not (status := json.loads(status_data)):
return None
return ColorData(
type_data=self._color_data_type,
h_value=status["h"],
s_value=status["s"],
v_value=status["v"],
)
|
|
# Copyright (c) 2013 TrilioData, Inc.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup job scheduler manages backup jobs
**Related Flags**
:backupjobs_topic: What :mod:`rpc` topic to listen to (default:
`raksha-backupjobs`).
:backupjobs_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`raksha.backupjob.manager.Manager`).
"""
from sqlalchemy import *
from datetime import datetime, timedelta
import time
import uuid
from oslo.config import cfg
from raksha import context
from raksha import exception
from raksha import flags
from raksha import manager
from raksha.virt import driver
from raksha.openstack.common import excutils
from raksha.openstack.common import importutils
from raksha.openstack.common import log as logging
from raksha.apscheduler.scheduler import Scheduler
from raksha.vault import swift
LOG = logging.getLogger(__name__)
backupjobs_manager_opts = [
cfg.StrOpt('vault_service',
default='raksha.vault.swift',
help='Vault to use for backup jobs.'),
]
scheduler_config = {'standalone': 'True'}
FLAGS = flags.FLAGS
FLAGS.register_opts(backupjobs_manager_opts)
def backupjob_callback(backupjob_id):
"""
Callback
"""
#TODO(gbasava): Implementation
class BackupJobManager(manager.SchedulerDependentManager):
"""Manages backup jobs """
RPC_API_VERSION = '1.0'
def __init__(self, service_name=None, *args, **kwargs):
self.service = importutils.import_module(FLAGS.vault_service)
self.az = FLAGS.storage_availability_zone
self.scheduler = Scheduler(scheduler_config)
self.scheduler.start()
super(BackupJobManager, self).__init__(service_name='backupjobscheduler',
*args, **kwargs)
self.driver = driver.load_compute_driver(None, None)
def init_host(self):
"""
Do any initialization that needs to be run if this is a standalone service.
"""
ctxt = context.get_admin_context()
LOG.info(_("Cleaning up incomplete backup operations"))
def backupjob_create(self, context, backupjob_id):
"""
Create a scheduled backup job in the backup job scheduler
"""
try:
backupjob = self.db.backupjob_get(context, backupjob_id)
#TODO(gbasava): Change it to list of VMs when we support multiple VMs
vm = self.db.backupjob_vms_get(context, backupjob_id)
LOG.info(_('create_backupjob started, %s:' %backupjob_id))
self.db.backupjob_update(context, backupjob_id, {'host': self.host,
'service': FLAGS.vault_service})
schjob = self.scheduler.add_interval_job(context, backupjob_callback, hours=24,
name=backupjob['display_name'], args=[backupjob_id],
backupjob_id=backupjob_id)
LOG.info(_('scheduled backup job: %s'), schjob.id)
except Exception as err:
with excutils.save_and_reraise_exception():
self.db.backupjob_update(context, backupjob_id,
{'status': 'error',
'fail_reason': unicode(err)})
self.db.backupjob_update(context, backupjob_id, {'status': 'available',
'availability_zone': self.az,
'schedule_job_id':schjob.id})
LOG.info(_('create_backupjob finished. backup: %s'), backupjob_id)
def backupjob_execute(self, context, backupjobrun_id):
"""
Execute the backup job. Invoked by the scheduler
"""
LOG.info(_('execute_backupjob started, backupjobrun_id %s:' %backupjobrun_id))
backupjobrun = self.db.backupjobrun_get(context, backupjobrun_id)
"""
Make sure the backup job is prepared before scheduling an incremental backup
"""
backupjob = self.db.backupjob_get(context, backupjobrun.backupjob_id)
self.db.backupjobrun_update(context, backupjobrun.id, {'status': 'executing'})
#TODO(gbasava): Pick the specified vault service for the backup job
vault_service = swift.SwiftBackupService(context)
#take a backup of each VM
for vm in self.db.backupjob_vms_get(context, backupjobrun.backupjob_id):
#create an entry for the VM
options = {'vm_id': vm.vm_id,
'backupjobrun_id': backupjobrun_id,
'backuptype':'incremental',
'status': 'creating'}
backupjobrun_vm = self.db.backupjobrun_vm_create(context, options)
self.driver.backup_execute(backupjob, backupjobrun, backupjobrun_vm, vault_service, self.db, context)
#TODO(gbasava): Check for the success (and update)
backupjobrun_vm.update({'status': 'available',})
#TODO(gbasava): handle the case where this can be updated by multiple backup job runs coming from
#different backup jobs.
self.db.vm_recent_backupjobrun_update(context, vm.vm_id, {'backupjobrun_id': backupjobrun.id})
#TODO(gbasava): Check for the success (and update)
self.db.backupjobrun_update(context, backupjobrun.id, {'status': 'available'})
def backupjob_prepare(self, context, backupjobrun_id):
"""
Prepare the backup job by doing a full backup
"""
LOG.info(_('prepare_backupjob started, backupjobrun_id %s:' %backupjobrun_id))
backupjobrun = self.db.backupjobrun_get(context, backupjobrun_id)
# Backup job preparation can only be executed once
if (backupjobrun.backuptype == "full" and backupjobrun.status == "completed"):
return;
backupjob = self.db.backupjob_get(context, backupjobrun.backupjob_id)
self.db.backupjobrun_update(context, backupjobrun.id, {'status': 'executing'})
#TODO(gbasava): Pick the specified vault service for the backup job
vault_service = swift.SwiftBackupService(context)
#take a backup of each VM
for vm in self.db.backupjob_vms_get(context, backupjobrun.backupjob_id):
#create an entry for the VM
options = {'vm_id': vm.vm_id,
'backupjobrun_id': backupjobrun_id,
'backuptype':'full',
'status': 'creating',}
backupjobrun_vm = self.db.backupjobrun_vm_create(context, options)
self.driver.backup_prepare(backupjob, backupjobrun, backupjobrun_vm, vault_service, self.db, context)
#TODO(gbasava): Check for the success (and update)
backupjobrun_vm.update({'status': 'available',})
#TODO(gbasava): handle the case where this can be updated by multiple backup job runs coming from
#different backup jobs.
self.db.vm_recent_backupjobrun_update(context, vm.vm_id, {'backupjobrun_id': backupjobrun.id})
#TODO(gbasava): Check for the success (and update)
self.db.backupjobrun_update(context, backupjobrun.id, {'status': 'available'})
def backupjob_delete(self, context, backupjob_id):
"""
Delete an existing backupjob
"""
backup = self.db.backup_get(context, backupjob_id)
LOG.info(_('delete_backup started, backup: %s'), backupjob_id)
#TODO(gbasava): Implement
def backupjobrun_restore(self, context, backupjobrun_id):
"""
Restore VMs and all its LUNs from a backup job run
"""
LOG.info(_('restore_backupjobrun started, restoring backupjobrun id: %(backupjobrun_id)s') % locals())
backupjobrun = self.db.backupjobrun_get(context, backupjobrun_id)
backupjob = self.db.backupjob_get(context, backupjobrun.backupjob_id)
#self.db.backupjobrun_update(context, backupjobrun.id, {'status': 'restoring'})
#TODO(gbasava): Pick the specified vault service from the backupjobrun
vault_service = swift.SwiftBackupService(context)
#restore each VM
for vm in self.db.backupjobrun_vm_get(context, backupjobrun.id):
self.driver.restore_instance(backupjob, backupjobrun, vm, vault_service, self.db, context)
def backupjobrun_delete(self, context, backupjobrun_id):
"""
Delete an existing backupjobrun
"""
backup = self.db.backup_get(context, backupjob_id, backupjob_instance_id)
#TODO(gbasava):Implement
|
|
from ctypes import BigEndianStructure, Union, c_uint8, c_uint16, c_uint32
from .compat import *
from .error import *
from .packet import *
from .util import *
class Box(Packet):
def __init__(self, type, total_size, payload, extended_size=False):
self.type = type
self.total_size = total_size
self.payload = payload
self.extended_size = extended_size
@property
def size(self):
size = 8
size += self.payload.size
if size > 0xFFFFFFFF or self.extended_size:
size += 8
return size
@classmethod
def _deserialize(cls, io):
size = io.read_u32()
type_ = io.read_padded(4)
header_size = 8
extended_size = False
if size == 1:
size = io.read_u64()
header_size += 8
extended_size = True
if type_ in PayloadTypes:
parent_data_left = io.data_left
io.data_left = size - header_size
payload = PayloadTypes[type_].deserialize(io=io)
if parent_data_left is not None:
io.data_left = parent_data_left - payload.size
else:
io.data_left = None
else:
if size == 0:
data = io.read()
else:
data = io.read(size - header_size)
payload = RawPayload(data)
return cls(type_, size, payload, extended_size)
def _serialize(self, packet):
size = self.payload.size
if size > 0xFFFFFFFF or self.extended_size:
packet.write_u32(1)
else:
packet.write_u32(size + 8)
packet.write_padded(self.type, 4)
if size > 0xFFFFFFFF or self.extended_size:
packet.write_u64(size + 16)
if isinstance(self.payload, BoxPayload):
self.payload.serialize(packet)
else:
packet.write(self.payload)
class BoxPayload(Packet):
@property
def size(self):
return 0
@classmethod
def box(cls, *args, **kw):
type_ = None
for name, kls in PayloadTypes.items():
if kls == cls:
type_ = name
break
payload = cls(*args, **kw)
return Box(type_, 0, payload)
class BoxContainer(BoxPayload):
def __init__(self, boxes):
self.boxes = boxes
@property
def size(self):
size = 0
for box in self.boxes:
size += box.size
return size
def _serialize(self, packet):
for box in self.boxes:
box.serialize(packet)
@classmethod
def _deserialize(cls, io):
boxes = []
while io.data_left > 0:
box = Box.deserialize(io=io)
boxes.append(box)
return cls(boxes)
class BoxContainerSingle(BoxPayload):
def __init__(self, box):
self.box = box
@property
def size(self):
return self.box.size
def _serialize(self, packet):
self.box.serialize(packet)
@classmethod
def _deserialize(cls, io):
box = Box.deserialize(io=io)
return cls(box)
class RawPayload(BoxPayload):
def __init__(self, data):
self.data = data
def __repr__(self):
return "<RawPayload size={0}>".format(self.size)
@property
def size(self):
return len(self.data)
@classmethod
def _deserialize(cls, io):
data = io.read()
return cls(data)
def _serialize(self, packet):
packet.write(self.data)
class BoxPayloadFTYP(BoxPayload):
def __init__(self, major_brand="f4v", minor_version=0,
compatible_brands=["isom", "mp42", "m4v"]):
self.major_brand = major_brand
self.minor_version = minor_version
self.compatible_brands = compatible_brands
@property
def size(self):
return 4+4+(len(self.compatible_brands)*4)
def _serialize(self, packet):
packet.write_padded(self.major_brand, 4)
packet.write_u32(self.minor_version)
for brand in self.compatible_brands:
packet.write_padded(brand, 4)
@classmethod
def _deserialize(cls, io):
major_brand = io.read_padded(4)
minor_version = io.read_u32()
compatible_brands = []
while io.data_left > 0:
brand = io.read_padded(4)
compatible_brands.append(brand)
return cls(major_brand, minor_version,
compatible_brands)
class BoxPayloadMVHD(BoxPayload):
def __init__(self, version=0, creation_time=0, modification_time=0,
time_scale=1000, duration=0, rate=1.0, volume=1.0,
matrix=[65536, 0, 0, 0, 65536, 0, 0, 0, 1073741824],
next_track_id=0):
self.version = version
self.creation_time = creation_time
self.modification_time = modification_time
self.time_scale = time_scale
self.duration = duration
self.rate = rate
self.volume = volume
self.matrix = matrix
self.next_track_id = next_track_id
@property
def size(self):
size = 1+3+4+4+2+2+4+4+(9*4)+(6*4)+4
if self.version == 1:
size += 3*8
else:
size += 3*4
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u3264(self.version, self.creation_time)
packet.write_u3264(self.version, self.modification_time)
packet.write_u32(self.time_scale)
packet.write_u3264(self.version, self.duration)
packet.write_s16_16(self.rate)
packet.write_s8_8(self.volume)
packet.write_u16(0) # Reserved
packet.write_u32(0) # Reserved
packet.write_u32(0) # Reserved
for m in self.matrix:
packet.write_u32(m)
for i in range(6):
packet.write_u32(0) # Reserved
packet.write_u32(self.next_track_id)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
io.read_u24() # Reserved
creation_time = io.read_u3264(version)
modification_time = io.read_u3264(version)
time_scale = io.read_u32()
duration = io.read_u3264(version)
rate = io.read_s16_16()
volume = io.read_s8_8()
io.read_u16() # Reserved
io.read_u32() # Reserved
io.read_u32() # Reserved
matrix = []
for i in range(9):
matrix.append(io.read_u32())
for i in range(6):
io.read_u32() # Reserved
next_track_id = io.read_u32()
return cls(version, creation_time,
modification_time, time_scale, duration,
rate, volume, matrix, next_track_id)
class SampleFlags(BoxPayload):
class Flags(Union):
class Bits(BigEndianStructure):
_fields_ = [("reserved", c_uint8, 6),
("sample_depends_on", c_uint8, 2),
("sample_is_depended_on", c_uint8, 2),
("sample_has_redundancy", c_uint8, 2),
("sample_padding_value", c_uint8, 3),
("sample_is_difference_sample", c_uint8, 1),
("sample_degradation_priority", c_uint16, 16)]
_fields_ = [("bit", Bits), ("byte", c_uint32)]
def __init__(self, sample_depends_on, sample_is_depended_on,
sample_has_redundancy, sample_padding_value,
sample_is_difference_sample, sample_degradation_priority):
self.flags = self.Flags()
self.flags.bit.reserved = 0 # Reserved
self.flags.bit.sample_depends_on = sample_depends_on
self.flags.bit.sample_is_depended_on = sample_is_depended_on
self.flags.bit.sample_has_redundancy = sample_has_redundancy
self.flags.bit.sample_padding_value = sample_padding_value
self.flags.bit.sample_is_difference_sample = sample_is_difference_sample
self.flags.bit.sample_degradation_priority = sample_degradation_priority
@property
def size(self):
return 4
def _serialize(self, packet):
packet.write_u32(self.flags.byte)
@classmethod
def _deserialize(cls, io):
flags = cls.Flags()
flags.byte = io.read_u32()
return cls(flags.bit.sample_depends_on, flags.bit.sample_is_depended_on,
flags.bit.sample_has_redundancy, flags.bit.sample_padding_value,
flags.bit.sample_is_difference_sample, flags.bit.sample_degradation_priority)
class BoxPayloadTREX(BoxPayload):
def __init__(self, version, track_id,
default_sample_description_index,
default_sample_duration, default_sample_size,
default_sample_flags):
self.version = version
self.track_id = track_id
self.default_sample_description_index = default_sample_description_index
self.default_sample_duration = default_sample_duration
self.default_sample_size = default_sample_size
self.default_sample_flags = default_sample_flags
@property
def size(self):
return 1+3+4+4+4+4+self.default_sample_flags.size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u32(self.track_id)
packet.write_u32(self.default_sample_description_index)
packet.write_u32(self.default_sample_duration)
packet.write_u32(self.default_sample_size)
self.default_sample_flags.serialize(packet)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
track_id = io.read_u32()
default_sample_description_index = io.read_u32()
default_sample_duration = io.read_u32()
default_sample_size = io.read_u32()
default_sample_flags = SampleFlags.deserialize(io=io)
return cls(version, track_id,
default_sample_description_index,
default_sample_duration, default_sample_size,
default_sample_flags)
class BoxPayloadTKHD(BoxPayload):
def __init__(self, version=0, flags=1, creation_time=0, modification_time=0,
track_id=1, duration=0, layer=0, alternate_group=0, volume=0.0,
transform_matrix=[65536, 0, 0, 0, 65536, 0, 0, 0, 1073741824],
width=0.0, height=0.0):
self.version = version
self.flags = flags
self.creation_time = creation_time
self.modification_time = modification_time
self.track_id = track_id
self.duration = duration
self.layer = layer
self.alternate_group = alternate_group
self.volume = volume
self.transform_matrix = transform_matrix
self.width = width
self.height = height
@property
def size(self):
size = 1+3+4+4+4+4+4+(4*2)+2+2+2+2+(9*4)+4+4
if self.version == 1:
size += 4*3
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(self.flags)
packet.write_u3264(self.version, self.creation_time)
packet.write_u3264(self.version, self.modification_time)
packet.write_u32(self.track_id)
packet.write_u32(0) # Reserved
packet.write_u3264(self.version, self.duration)
for i in range(2):
packet.write_u32(0) # Reserved
packet.write_s16(self.layer)
packet.write_s16(self.alternate_group)
packet.write_s8_8(self.volume)
packet.write_u16(0) # Reserved
for i in range(9):
packet.write_u32(self.transform_matrix[i])
packet.write_s16_16(self.width)
packet.write_s16_16(self.height)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
creation_time = io.read_u3264(version)
modification_time = io.read_u3264(version)
track_id = io.read_u32()
io.read_u32() # Reserved
duration = io.read_u3264(version)
for i in range(2):
io.read_u32() # Reserved
layer = io.read_s16()
alternate_group = io.read_s16()
volume = io.read_s8_8()
io.read_u16() # Reserved
transform_matrix = []
for i in range(9):
transform_matrix.append(io.read_s32())
width = io.read_s16_16()
height = io.read_s16_16()
return cls(version, flags, creation_time, modification_time,
track_id, duration, layer, alternate_group, volume,
transform_matrix, width, height)
class BoxPayloadMDHD(BoxPayload):
def __init__(self, version=0, creation_time=0, modification_time=0,
time_scale=1000, duration=0, language="eng"):
self.version = version
self.creation_time = creation_time
self.modification_time = modification_time
self.time_scale = time_scale
self.duration = duration
self.language = language
@property
def size(self):
size = 1+3+4+4+4+4+2+2
if self.version == 1:
size += 4*3
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u3264(self.version, self.creation_time)
packet.write_u3264(self.version, self.modification_time)
packet.write_u32(self.time_scale)
packet.write_u3264(self.version, self.duration)
packet.write_s16(iso639_to_lang(self.language))
packet.write_u16(0) # Reserved
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
io.read_u24() # Reserved
creation_time = io.read_u3264(version)
modification_time = io.read_u3264(version)
time_scale = io.read_u32()
duration = io.read_u3264(version)
language = lang_to_iso639(io.read_u16())
io.read_u16() # Reserved
return cls(version, creation_time, modification_time,
time_scale, duration, language)
class BoxPayloadHDLR(BoxPayload):
def __init__(self, version=0, predefined=0, handler_type="vide",
name=""):
self.version = version
self.predefined = predefined
self.handler_type = handler_type
self.name = name
@property
def size(self):
size = 1+3+4+4+(3*4)
size += len(self.name)
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u32(self.predefined)
packet.write_padded(self.handler_type, 4)
for i in range(3):
packet.write_u32(0) # Reserved
packet.write(bytes(self.name, "utf8"))
#packet.write_string(self.name)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24() # Reserved
predefined = io.read_u32()
handler_type = io.read_padded(4)
for i in range(3):
io.read_u32() # Reserved
name = io.read_string()
return cls(version, predefined, handler_type,
name)
class BoxPayloadVMHD(BoxPayload):
def __init__(self, version=0, flags=1, graphics_mode=0, op_color=[0, 0, 0]):
self.version = version
self.flags = flags
self.graphics_mode = graphics_mode
self.op_color = op_color
@property
def size(self):
return 1+3+2+(3*2)
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(self.flags)
packet.write_u16(self.graphics_mode)
for i in range(3):
packet.write_u16(self.op_color[i])
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
graphics_mode = io.read_u16()
op_color = []
for i in range(3):
op_color.append(io.read_u16())
return cls(version, flags, graphics_mode, op_color)
class BoxPayloadDREF(BoxContainer):
def __init__(self, version=0, boxes=[]):
self.version = version
self.boxes = boxes
@property
def size(self):
size = 1+3+4
for box in self.boxes:
size += box.size
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u32(len(self.boxes))
for box in self.boxes:
box.serialize(packet)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
entry_count = io.read_u32()
boxes = []
for i in range(entry_count):
box = Box.deserialize(io=io)
boxes.append(box)
return cls(version, boxes)
class BoxPayloadURL(BoxPayload):
def __init__(self, version=0, flags=1):
self.version = version
self.flags = flags
@property
def size(self):
return 4
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(self.flags)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
return cls(version, flags)
class BoxPayloadSTSD(BoxContainer):
def __init__(self, version=0, descriptions=[]):
self.version = version
self.descriptions = descriptions
@property
def size(self):
size = 4+4
for description in self.descriptions:
size += description.size
return size
@property
def boxes(self):
return self.descriptions
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u32(len(self.descriptions))
for description in self.descriptions:
description.serialize(packet)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
count = io.read_u32()
descriptions = []
for i in range(count):
box = Box.deserialize(io=io)
descriptions.append(box)
return cls(version, descriptions)
class BoxPayloadVisualSample(BoxContainer):
def __init__(self, data_reference_index=0, width=0, height=0,
horiz_resolution=0.0, vert_resolution=0.0, frame_count=0,
compressor_name="", depth=0, boxes=[]):
self.data_reference_index = data_reference_index
self.width = width
self.height = height
self.horiz_resolution = horiz_resolution
self.vert_resolution = vert_resolution
self.frame_count = frame_count
self.compressor_name = compressor_name
slef.depth = depth
self.boxes = boxes
@property
def size(self):
return 4
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(self.flags)
@classmethod
def _deserialize(cls, io):
for i in range(4):
io.read_u8()
return cls(version, flags)
class BoxPayloadMDAT(RawPayload):
def __repr__(self):
return "<BoxPayloadMDAT size={0}>".format(self.size)
class BoxPayloadSKIP(RawPayload):
def __repr__(self):
return "<BoxPayloadSKIP size={0}>".format(self.size)
class BoxPayloadFREE(RawPayload):
def __repr__(self):
return "<BoxPayloadFREE size={0}>".format(self.size)
class BoxPayloadABST(BoxPayload):
class Flags(Union):
class Bits(BigEndianStructure):
_fields_ = [("profile", c_uint8, 2),
("live", c_uint8, 1),
("update", c_uint8, 1),
("reserved", c_uint8, 4)]
_fields_ = [("bit", Bits), ("byte", c_uint8)]
def __init__(self, version, bootstrap_info_version, profile, live, update,
time_scale, current_media_time, smpte_time_code_offset,
movie_identifier, server_entry_table, quality_entry_table,
drm_data, metadata, segment_run_table_entries,
fragment_run_table_entries):
self.version = version
self.bootstrap_info_version = bootstrap_info_version
self.flags = self.Flags()
self.flags.bit.profile = profile
self.flags.bit.live = live
self.flags.bit.update = update
self.flags.bit.reserved = 0
self.time_scale = time_scale
self.current_media_time = current_media_time
self.smpte_time_code_offset = smpte_time_code_offset
self.movie_identifier = movie_identifier
self.server_entry_table = server_entry_table
self.quality_entry_table = quality_entry_table
self.drm_data = drm_data
self.metadata = metadata
self.segment_run_table_entries = segment_run_table_entries
self.fragment_run_table_entries = fragment_run_table_entries
profile = flagproperty("flags", "profile")
update = flagproperty("flags", "update", True)
live = flagproperty("flags", "live", True)
@property
def size(self):
size = 1+3+4+1+4+8+8
size += len(self.movie_identifier) + 1
size += 1
for server in self.server_entry_table:
size += len(server) + 1
size += 1
for quality_entry in self.quality_entry_table:
size += len(quality_entry) + 1
size += len(self.drm_data) + 1
size += len(self.metadata) + 1
size += 1
for segment_run_table in self.segment_run_table_entries:
size += segment_run_table.size
size += 1
for fragment_run_table in self.fragment_run_table_entries:
size += fragment_run_table.size
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(0) # Reserved
packet.write_u32(self.bootstrap_info_version)
packet.write_u8(self.flags.byte)
packet.write_u32(self.time_scale)
packet.write_u64(self.current_media_time)
packet.write_u64(self.smpte_time_code_offset)
packet.write_string(self.movie_identifier)
packet.write_u8(len(self.server_entry_table))
for server_entry in self.server_entry_table:
packet.write_string(server_entry)
packet.write_u8(len(self.quality_entry_table))
for quality_entry in self.quality_entry_table:
packet.write_string(quality_entry)
packet.write_string(self.drm_data)
packet.write_string(self.metadata)
packet.write_u8(len(self.segment_run_table_entries))
for segment_run_table in self.segment_run_table_entries:
segment_run_table.serialize(packet)
packet.write_u8(len(self.fragment_run_table_entries))
for fragment_run_table in self.fragment_run_table_entries:
fragment_run_table.serialize(packet)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
io.read_u24() # Reserved
bootstrap_info_version = io.read_u32()
flags = cls.Flags()
flags.byte = io.read_u8()
time_scale = io.read_u32()
current_media_time = io.read_u64()
smpte_time_code_offset = io.read_u64()
movie_identifier = io.read_string()
server_entry_table = []
server_entry_count = io.read_u8()
for i in range(server_entry_count):
server_entry = io.read_string()
server_entry_table.append(server)
quality_entry_table = []
quality_entry_count = io.read_u8()
for i in range(quality_entry_count):
quality_entry = io.read_string()
quality_entry_table.append(quality)
drm_data = io.read_string()
metadata = io.read_string()
segment_run_table_entries = []
segment_run_table_count = io.read_u8()
for i in range(segment_run_table_count):
segment_run_table = Box.deserialize(io=io)
segment_run_table_entries.append(segment_run_table)
fragment_run_table_entries = []
fragment_run_table_count = io.read_u8()
for i in range(fragment_run_table_count):
fragment_run_table = Box.deserialize(io=io)
fragment_run_table_entries.append(fragment_run_table)
return cls(version, bootstrap_info_version, flags.bit.profile,
flags.bit.live, flags.bit.update, time_scale,
current_media_time, smpte_time_code_offset, movie_identifier,
server_entry_table, quality_entry_table, drm_data,
metadata, segment_run_table_entries, fragment_run_table_entries)
class SegmentRunEntry(BoxPayload):
def __init__(self, first_segment, fragments_per_segment):
self.first_segment = first_segment
self.fragments_per_segment = fragments_per_segment
@property
def size(self):
return 8
def _serialize(self, packet):
packet.write_u32(self.first_segment)
packet.write_u32(self.fragments_per_segment)
@classmethod
def _deserialize(cls, io):
first_segment = io.read_u32()
fragments_per_segment = io.read_u32()
return cls(first_segment, fragments_per_segment)
class BoxPayloadASRT(BoxPayload):
def __init__(self, version, flags, quality_segment_url_modifiers,
segment_run_entry_table):
self.version = version
self.flags = flags
self.quality_segment_url_modifiers = quality_segment_url_modifiers
self.segment_run_entry_table = segment_run_entry_table
@property
def size(self):
size = 1+3+1+4
for quality in self.quality_segment_url_modifiers:
size += len(quality) + 1
for segment_run_entry in self.segment_run_entry_table:
size += segment_run_entry.size
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(self.flags)
packet.write_u8(len(self.quality_segment_url_modifiers))
for quality in self.quality_segment_url_modifiers:
packet.write_string(quality)
packet.write_u32(len(self.segment_run_entry_table))
for segment_run_entry in self.segment_run_entry_table:
segment_run_entry.serialize(packet)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
quality_segment_url_modifiers = []
quality_entry_count = io.read_u8()
for i in range(quality_entry_count):
quality = io.read_string()
quality_segment_url_modifiers.append(quality)
segment_run_entry_count = io.read_u32()
segment_run_entry_table = []
for i in range(segment_run_entry_count):
segment_run_entry = SegmentRunEntry.deserialize(io=io)
segment_run_entry_table.append(segment_run_entry)
return cls(version, flags, quality_segment_url_modifiers,
segment_run_entry_table)
class FragmentRunEntry(BoxPayload):
def __init__(self, first_fragment, first_fragment_timestamp,
fragment_duration, discontinuity_indicator):
self.first_fragment = first_fragment
self.first_fragment_timestamp = first_fragment_timestamp
self.fragment_duration = fragment_duration
self.discontinuity_indicator = discontinuity_indicator
@property
def size(self):
size = 4+8+4
if self.fragment_duration == 0:
size += 1
return size
def _serialize(self, packet):
packet.write_u32(self.first_fragment)
packet.write_u64(self.first_fragment_timestamp)
packet.write_u32(self.fragment_duration)
if self.fragment_duration == 0:
packet.write_u8(self.discontinuity_indicator)
@classmethod
def _deserialize(cls, io):
first_fragment = io.read_u32()
first_fragment_timestamp = io.read_u64()
fragment_duration = io.read_u32()
if fragment_duration == 0:
discontinuity_indicator = io.read_u8()
else:
discontinuity_indicator = None
return cls(first_fragment, first_fragment_timestamp,
fragment_duration, discontinuity_indicator)
class BoxPayloadAFRT(BoxPayload):
def __init__(self, version, flags, time_scale,
quality_segment_url_modifiers,
fragment_run_entry_table):
self.version = version
self.flags = flags
self.time_scale = time_scale
self.quality_segment_url_modifiers = quality_segment_url_modifiers
self.fragment_run_entry_table = fragment_run_entry_table
@property
def size(self):
size = 1+3+4+1+4
for quality in self.quality_segment_url_modifiers:
size += len(quality) + 1
for fragment_run_entry in self.fragment_run_entry_table:
size += fragment_run_entry.size
return size
def _serialize(self, packet):
packet.write_u8(self.version)
packet.write_u24(self.flags)
packet.write_u32(self.time_scale)
packet.write_u8(len(self.quality_segment_url_modifiers))
for quality in self.quality_segment_url_modifiers:
packet.write_string(quality)
packet.write_u32(len(self.fragment_run_entry_table))
for fragment_run_entry in self.fragment_run_entry_table:
fragment_run_entry.serialize(packet)
@classmethod
def _deserialize(cls, io):
version = io.read_u8()
flags = io.read_u24()
time_scale = io.read_u32()
quality_segment_url_modifiers = []
quality_entry_count = io.read_u8()
for i in range(quality_entry_count):
quality = io.read_string()
quality_segment_url_modifiers.append(quality)
fragment_run_entry_count = io.read_u32()
fragment_run_entry_table = []
for i in range(fragment_run_entry_count):
fragment_run_entry = FragmentRunEntry.deserialize(io=io)
fragment_run_entry_table.append(fragment_run_entry)
return cls(version, flags, time_scale,
quality_segment_url_modifiers,
fragment_run_entry_table)
class BoxPayloadMVEX(BoxContainer):
pass
class BoxPayloadMFRA(BoxContainer):
pass
class BoxPayloadTRAK(BoxContainer):
pass
class BoxPayloadMDIA(BoxContainer):
pass
class BoxPayloadMINF(BoxContainer):
pass
class BoxPayloadSTBL(BoxContainer):
pass
class BoxPayloadMOOV(BoxContainer):
pass
class BoxPayloadMOOF(BoxContainer):
pass
class BoxPayloadMETA(BoxContainer):
pass
class BoxPayloadDINF(BoxContainerSingle):
pass
PayloadTypes = {
"ftyp": BoxPayloadFTYP,
"mvhd": BoxPayloadMVHD,
"trex": BoxPayloadTREX,
"tkhd": BoxPayloadTKHD,
"mdhd": BoxPayloadMDHD,
"hdlr": BoxPayloadHDLR,
"vmhd": BoxPayloadVMHD,
"dref": BoxPayloadDREF,
"url": BoxPayloadURL,
"stsd": BoxPayloadSTSD,
"mdat": BoxPayloadMDAT,
"abst": BoxPayloadABST,
"asrt": BoxPayloadASRT,
"afrt": BoxPayloadAFRT,
"skip": BoxPayloadSKIP,
"free": BoxPayloadFREE,
# Containers
"moov": BoxPayloadMOOV,
"moof": BoxPayloadMOOF,
"mvex": BoxPayloadMVEX,
"mdia": BoxPayloadMDIA,
"minf": BoxPayloadMINF,
"meta": BoxPayloadMETA,
"mfra": BoxPayloadMFRA,
"stbl": BoxPayloadSTBL,
"trak": BoxPayloadTRAK,
"dinf": BoxPayloadDINF,
}
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:17:39 2016
Class files to store workflow information
@author: alex
"""
#Node object for use in a tree
class Node(object):
def __init__(self):
self.connections = []
self.data = None
#Tree datastructure to allow for storage of non-linear flows
class Tree(object):
def __init__(self):
self.root = Node()
#Key Action object for in-memory storage of key actions
class _KeyAction():
def __init__(self):
self.name = ''
self.description = ''
self.systemarea = ''
self.module = ''
self.custom = False
self.id = 0
self.expected_result = ''
self.notes = ''
self.input_parameters = []
self.next_action_id = -1
self.next_action_list = []
def add_nextaction(self, next_act):
self.next_action_list.append(next_act)
self.next_action_id = next_act.id
def has_nextaction(self):
if self.next_action_id == -1:
return False
else:
return True
def add_inputparameter(self, new_ip):
self.input_parameters.append(new_ip)
def remove_inputparameter(self, ip):
self.input_parameters.remove(ip)
def clear_inputparameters(self):
del self.input_parameters[:]
def numParams(self):
return len(self.input_parameters)
#Input Parameter Object for in-memory storage of input params
class _InputParameter():
def __init__(self):
self.name = ''
self.id = 0
self.value = ''
#Workflow object for in-memory storage of workflow objects, as well as analysis/data manipulation
class _Workflow():
def __init__(self):
self.name = ''
self.id = 0
self.keyactions = []
self.keyactiontreelist = []
def add_keyaction(self, new_action):
self.keyactions.append(new_action)
def remove_keyaction(self, action):
self.keyactions.remove(action)
def clear_keyactions(self):
del self.keyactions[:]
def numRows(self):
num_rows = 0
for action in self.keyactions:
if action.numParams() == 0:
num_rows+=1
else:
num_rows+=action.numParams()
return num_rows
#Key Action Tree Methods
def is_element_in_tree(self, input_id, tree):
return self.is_element_in_nodechain(input_id, tree.root)
def is_element_in_nodechain(self, input_id, node):
if node.data.id == input_id:
return True
print('Match on element %s found' % (input_id))
else:
if len(node.connections) > 0:
match=False
for con in node.connections:
if self.is_element_in_nodechain(input_id, con):
match = True
print('Match on element %s found' % (input_id))
return match
else:
return False
def is_element_connected_to_tree(self, input_id, tree):
return self.is_element_connected_to_nodechain(input_id, tree.root)
def is_element_connected_to_nodechain(self, input_id, node):
for act in node.data.next_action_list:
if act.id == input_id:
print('Connection on element %s found' % (input_id))
return True
# if len(node.connections) > 0:
for con in node.connections:
a = self.is_element_connected_to_nodechain(input_id, con)
print('Is Element Connected To Node Chain Returned with Value %s' % (a))
return a
# else:
# return False
def print_trees(self):
for tree in self.keyactiontreelist:
self.print_node(tree.root)
def print_node(self, node):
print(node.data.id)
for cons in node.connections:
self.print_node(cons)
#Methods for building the Key Action Tree
def find_keyaction(self, keyactionid):
for action in self.keyactions:
if action.id == keyactionid:
return action
return 0
def build_keyactionchain(self, act, chain_list):
print('Build Key Action Chain called with action %s and chain list %s' % (act.id, chain_list))
new_chain = []
test_action = act
multi_hit=False
#Loop until an ending action is found
while test_action.has_nextaction():
print('Loop entered, next action list length %s' % (len(test_action.next_action_list)))
new_chain.append(test_action)
#If this is a standard node, we can just read the next action id
if len(test_action.next_action_list) == 1:
test_action = self.find_keyaction(test_action.next_action_id)
print('Node %s appended and new test action assigned' % (test_action.id))
#If this is an and or or node, we need to account for splits
else:
acts = test_action.next_action_list
for action in acts:
multi_hit=True
test_action = self.build_keyactionchain(action, chain_list)
if multi_hit == False:
new_chain.append(test_action)
chain_list.append(new_chain)
print('Adding chain to chain list with starting ID %s and length %s' % (new_chain[0].id, len(new_chain)))
for element in new_chain:
print(element.id)
return test_action
def compare_chain(self, chain, comp_chain):
print('Comparing Chain %s to Chain %s' % (chain, comp_chain))
#Compare the given chain to the compare chain and see whether it is a
#New Chain (0), Prepend (1), or Append (2) Scenario. We return -1 if no change is necessary
if len(comp_chain) < 1:
print('Length of base chain less than 1')
return 0
elif len(chain) < 1:
print('Length of compare chain less than 1')
return -1
else:
#Compare the first elements
if comp_chain[0].id == chain[0].id:
print('First elements are equal')
if len(comp_chain) > len(chain) - 1:
print('Length of base chain is greater than compare chain - 1')
return -1
else:
return 2
else:
#The first elements aren't equal, we need to compare the other
#elements to determine whether it's a new chain or prepend scenario
match=False
match_indicator=False
match_id=-1
for link in chain:
print('Processing started for link with ID %s' % (link.id))
for element in comp_chain:
print('Compare started on element with ID %s' % (element.id))
if match==False:
print('Match is false')
if link.id == element.id:
print('Match found')
match=True
match_id = link.id
if match:
#A match exists, meaning we have a prepend/append scenario
if comp_chain[0].id == match_id:
#The first element of the comparison chain is the match,
#This is a pure prepend scenario
print('Prepend Scenario detected')
return 1
else:
#This is a tree scenario, which is a new chain scenario
print('New chain scenario detected')
return 0
else:
#We have a new chain scenario
print('New chain scenario defaulted')
return 0
def connect_chain_to_tree(self, chain, tree):
print('Is Chain Connected to Tree Called with chain that has start element ID %s' % (chain[0].id))
self.print_trees()
node=tree.root
keep_going=True
while keep_going:
print('While loop entered')
#Test for match
print('Comparison started on %s and %s' % (node.data.next_action_id, chain[0].id))
for act in node.data.next_action_list:
if act.id == chain[0].id:
connected_node = node
print('matching node found with ID %s' % (node.data.id))
#Add the chain onto the connected node
z=0
for z in range(0, len(chain)):
new_node = Node()
new_node.data=chain[z]
print('New Node added with ID %s' % (new_node.data.id))
connected_node.connections.append(new_node)
connected_node = new_node
return True
if len(node.connections) == 0:
keep_going=False
#If no match was encountered, then we move on to the next node
if len(node.connections) == 1:
node = node.connections[0]
else:
for con in node.connections:
self.connect_chain_to_nodeset(chain, con)
return False
def connect_chain_to_nodeset(self, chain, node):
print('Is chain connected to nodeset called with chain %s and node %s' % (chain, node))
while len(node.connections) != 0:
#Test for match
for el in chain:
if node.data.next_action_id == el.id:
connected_node = node
print('matching node found with ID %s' % (node.data.id))
#Add the chain onto the connected node
z=0
for z in range(0, len(chain)):
new_node = Node()
new_node.data=new_chain[z]
print('New Node added with ID %s' % (new_node.data.id))
connected_node.connections.append(new_node)
connected_node = new_node
return True
#If no match was encountered, then we move on to the next node
if len(node.connections) == 1:
node = node.connections[0]
else:
for con in node.connections:
self.connect_chain_to_nodeset(chain, con)
return False
def build_keyactiontree(self):
print('Build Key Action Tree Called')
keep_going = True
#Cycle through each action and take the following steps:
#1. Compare to existing chains and, if already accounted for, do nothing
#2. Build a full chain until an end point is reached, then push this to a list of lists (chains)
# Splits are represented by new chains from the diverging node
# Prepending Tail scenario common
chain_list=[]
#Check for already accounted for actions
for action in self.keyactions:
for chain in chain_list:
for element in chain:
if action.id == element.id:
keep_going = False
if keep_going:
#Build a chain list
self.build_keyactionchain(action, chain_list)
#Attach each chain into the final chain list
#This step removes prepending scenarios
#3 Distinct Possibilities:
#New Chain: Create new tree
#Prepend: Create a new tree, replace the old tree
#Append: Add Brances to a tree
final_list=[]
prepend_list=[]
for chain in chain_list:
print('Processing Started for chain with first element ID %s and length %s' % (chain[0].id, len(chain)))
if len(final_list) == 0:
final_list.append(chain)
print('First chain appended to final list')
else:
final_compare=-1
for element in final_list:
print('Comparison against element in final list started')
comp = self.compare_chain(chain, element)
if comp > -1:
#If the final comparison is < comparison, then set to comparison
print('Compare result is %s' % (comp))
if final_compare<comp:
print('Final Compare is less than comparison')
final_compare=comp
prepend_list=element
if final_compare == 0 or final_compare == 2:
#New Chain
final_list.append(chain)
print('Chain with start element ID %s appended to final list')
elif final_compare == 1:
#Prepend
new_elements=[]
keep_running=True
for obj in chain:
if obj.id == prepend_list[0].id:
keep_running=False
elif keep_running:
new_elements.append(obj)
e_counter=0
for e in new_elements:
prepend_list.insert(e_counter, e)
e_counter+=1
#Loop 1
print('Now we have a list of chains, none of which overlap, we can build them into trees')
for f in final_list:
for c in f:
print(c.id)
chain_count=0
for chain in final_list:
#Determine which case we're dealing with, if a single stack it can
#be added to the tree list. If a branch then do nothing
match_indicator=False
for c in final_list:
print('Comparison against chain with start element ID %s started' % (c[0].id))
for el in c:
for na in el.next_action_list:
if na.id == chain[0].id:
match_indicator=True
print('Match encountered')
if match_indicator==False:
#Create a tree from the chain and add it to the tree list
chain_count+=1
print('Root found, tree started')
t = Tree()
t.root.data = chain[0]
i=1
node=t.root
print('Chain Length %s' % (len(chain)))
for i in range(1, len(chain)):
new_node = Node()
new_node.data=chain[i]
print('Adding node with ID %s to tree' % (new_node.data.id))
node.connections.append(new_node)
node = new_node
self.keyactiontreelist.append(t)
#Loop 2
#Secondary Mechanism to prevent infinite loops
original_chain_count=0
no_match_counter=0
no_match_limit=30
while chain_count < len(final_list) and no_match_counter < no_match_limit:
for chain in final_list:
print('Processing started on chain with start element ID %s and length %s' % (chain[0].id, len(chain)))
#Need to reverse the logic to add things onto existing trees instead of creating new ones
#Determine which case we're dealing with, if a root then we should build it
#and add it to the tree list once completed, then mark all the other chains to
#not be considered in the rest of the loop. If a branch do nothing.
for tree in self.keyactiontreelist:
for el in chain:
#Check for existing key actions
match=True
for element in chain:
if self.is_element_in_tree(element.id, tree) == False:
match = False
if match == False:
if self.is_element_connected_to_tree(el.id, tree):
print('Element in a tree references this chain')
#Pull the tree and add the chain
self.connect_chain_to_tree(chain, tree)
chain_count+=1
if chain_count == original_chain_count:
no_match_counter+=1
print('No Match Counter Iterated')
else:
original_chain_count=chain_count
no_match_counter=0
|
|
from scipy.io import wavfile
import subprocess
import time
import math
import glob
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
import random
import shelve
from decimal import Decimal
import math
import argparse
import sys
import shutil
# test
import matplotlib.pyplot as plt
from datetime import datetime
parser = argparse.ArgumentParser(description="Machine learning algorithm for generating audio")
def run(cmd, shell=True):
return subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell).communicate()[0]
#parser.add_argument("--logdir",type=str, default="Saves" ,help='directory for saving')
parser.add_argument("--data_dir",type=str,default="aud" ,help='directory with sound data (default aud)')
parser.add_argument("--generations",type=int,default=10000,help='number of generations (default 10000)')
parser.add_argument("--num_files",type=int,default=-1,help='number of files or -1 for all of them(default -1)')
parser.add_argument("--checkpoint_every", type=int,default=50,help="number of generations until checkport")
parser.add_argument("--Sample_rate", type=int,default=5000,help="Sample rate")
parser.add_argument("--file_split", type=int,default=10000,help="Number of files per input")
parser.add_argument("--learning_rate", type=float,default=0.01,help="learning rate (default 0.01)")
parser.add_argument("--action", type=int,default=3,help="1 for turning files to data, 2 for learning and 3 for both")
parser.add_argument("--restore", type=str,default=None,help="restore previus session")
parser.add_argument("--generate_path", type=str,default=None,help="Generateed file origanal path")
parser.add_argument("--generate_new", type=str,default=None,help="Path to new file")
args = parser.parse_args()
if bool(args.generate_path)^bool(args.generate_new):
raise ValueError("You must specify either both generate_path and generate_new or None")
if args.restore:
date_start = "/".join(args.restore.split("/")[:-1])
else:
date_start = "{0:%Y-%m-%dT%H-%M-%S}::".format(datetime.now())+str(run("ls checkpoints| wc -l")[:-1])
#os.mkdir("checkpoints/%s" %date_start)
#os.mkdir("summarys/%s" %date_start)
#time_format = "{0:%Y-%m-%dT%H-%M-%S}"
full_start = time.time()
args.logdir = "checkpoints/%s" %date_start
print args.logdir
if not os.path.isdir(args.logdir):
os.mkdir(args.logdir)
#else:
# if(raw_input("a directory with the chosen name already exists. Do you want to overide? Y/n:").lower() == "y"):
# shutil.rmtree(os.path.join("/home/guyknaan/voiceswap",args.logdir))
# os.mkdir(args.logdir)
#else:
# print "not overiding"
# sys.exit()
#if not os.path.isdir(args.data_dir):
# raise ValueError("the chose data dir: %s does not exist" %args.data_dir)
SAMPLE_RATE= args.Sample_rate
d = shelve.open("data")
keys_train = d["train"]
keys_test = d["test"]
"""
for i in range(len(keys_train)):
keys_train[i][0]=keys_train[i][0]#+".flac"
for i in range(len(keys_test)):
keys_test[i][1]=keys_test[i][1]#+".flac"
d.close()
"""
def sigmoid(x):
return 1 / (1 + math.exp(-x))
emp0 = np.empty(0)
emp1 = np.empty(1)
def join_sound(file_in, file_out,size=300, mode=0):
# print mode
try:
if mode == 0:
inp,_ = librosa.load(file_in, sr=SAMPLE_RATE)
dur=librosa.get_duration(inp,SAMPLE_RATE)
x,_ = librosa.load(file_out,sr=SAMPLE_RATE)
#print x
new_dur= librosa.get_duration(x,sr=SAMPLE_RATE)
#replace maybe
#out, _ = librosa.load(file_out, sr=int(SAMPLE_RATE*dur/new_dur))
out, _ = librosa.load(file_out, sr=SAMPLE_RATE)
# print inp.shape
#turn the numbers to the range from 0 to 1
if(len(inp)>len(out)):
inp=inp[0:len(out)]
else:
out=out[0:len(inp)]
else:
inp = file_in
out = file_out
for i in range(len(inp)):
inp[i]= (inp[i]+1)/2.0
# print inp[i]
for i in range(len(out)):
out[i]= (out[i]+1)/2.0
# print out
# print inp
# print inp.shape
# print out.shape
newInp = []
newOut = []
index = 0
out = out[0:int(size*math.floor(len(out)/size))]
inp = inp[0:int(size*math.floor(len(inp)/size))]
inp=np.split(inp,len(inp)/size)
out=np.split(out,len(out)/size)
for i in range(len(out)):
wavfile.write("out_test/out_file%03d.wav" %i,SAMPLE_RATE,np.array(out[i]))
wavfile.write("out_test/in_file%03d.wav" %i,SAMPLE_RATE,np.array(inp[i]))
out[i]=np.append(out[i],float(i)/len(out))
except ValueError as e:
print e
raise
return emp1,emp1
except MemoryError as e:
print e
return emp0,emp0
return np.array(out[:-2]),np.array(inp[:-2])
#a,b = join_sound("aud/KXEGWMOFSFoutput179.mp3","aud/ITMUVRTUURoutput561.mp3")
join_sound("tester/DLPTOAUSIQ0211.flac","tester/DLPTOAUSIQ0211_output.mp3",size=10000)
raise
#print a.shape
#print b.shape
def parse_audio_files(files,dir,ind=None,size=300):
if ind is None:
ind = len(files)
inputs, outputs = [], []
count_num=0
#print files
#print ind
#print len(files[0:ind])
for fn in files[0:ind]:
count_num+=1
print "loading the %sth file: %s" %(str(count_num),fn[0])
if len(fn) == 2:
try:
#inp, out = join_sound(dir + "/" + fn[0],dir + "/" + fn[1],size=size)
# temp temp
inp, out = join_sound(fn[0],fn[1],size=size)
if inp is emp0:
return np.array(inputs),np.vstack(np.array(outputs))
if inp is not emp1:
for i in out:
outputs.append(i)
for i in inp:
inputs.append(i)
except ValueError as e:
print e
#raise
except MemoryError as e:
return np.array(inputs[0:-10]),np.vstack(np.array(outputs[0:-10]))
return np.array(inputs),np.vstack(np.array(outputs))
def one_hot_encode(labels):
n_labels = len(labels)
n_unique_labels = len(np.unique(labels))
one_hot_encode = np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels] = 1
return one_hot_encode
d = shelve.open("preloaded")
if args.action == 1 or args.action == 3:
try:
tr_features, tr_labels = parse_audio_files(keys_train,args.data_dir, ind=args.num_files if args.num_files != -1 else None,size=args.file_split)
except Exception as e:
raise
d["features"] = tr_features
d["labels"] = tr_labels
raise
d["features"] = tr_features
d["labels"] = tr_labels
else:
tr_features = d["features"]
tr_labels = d["labels"]
#tr_features, tr_labels = np.random.rand(100,20),np.random.rand(100,20)
#print args.num_files if args.num_files != -1 else None
#print tr_features.shape
#print tr_labels.shape
n_dim = tr_features.shape[1]
n_classes = tr_labels.shape[1]
training_epochs = args.generations
n_hidden_units_one = 500
n_hidden_units_two = 550
sd = 1
learning_rate = args.learning_rate
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes]))
y_ = tf.nn.sigmoid(tf.matmul(h_2,W)+b)
#cost_function = -tf.reduce_mean(Y * tf.log(y_))
cost_function=tf.reduce_mean(tf.square(tf.sqrt(y_)-tf.sqrt(Y))/(2**0.5))
tf.summary.scalar('cost', cost_function)
#adapt_rate = tf.placeholder(tf.float32, shape=[])
#optimizer = tf.train.GradientDescentOptimizer(adapt_rate).minimize(cost_function)
optimizer=tf.train.AdagradOptimizer(args.learning_rate).minimize(cost_function)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sumarizer = tf.summary.FileWriter("summarys/%s" %date_start)
def run_training():
#rate=float(learning_rate)
with tf.Session() as sess:
done=0
if args.restore:
saver.restore(sess,args.restore)
else:
sess.run(init)
if args.generate_path:
loaded, _ = join_sound("aud/XPLQAKERFH0403.flac",args.generate_path)
print loaded.shape
output = sess.run(y_, feed_dict={X:loaded})
output = output.reshape(output.size)
output = (output*2) -1
print output
print output.shape
wavfile.write(args.generate_new,SAMPLE_RATE,output)
return
try:
for epoch in range(training_epochs):
start_time = time.time()
#print sess.run(y_)
feed_dict={X:tr_features,Y:tr_labels}
_,cost = sess.run([optimizer,cost_function],feed_dict=feed_dict)
#cost_history = np.append(cost_history,cost)
duration = time.time() - start_time
print('step {:d} - loss = {:e}, ({:.3f} sec/stp)'.format(epoch, cost, duration))
if epoch%args.checkpoint_every==0:
print "Saving"
saver.save(sess, os.path.join(args.logdir, 'model.ckpt'), global_step=epoch)
if epoch%10 == 0:
summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict)
sumarizer.add_summary(summary, epoch)
if epoch+1==training_epochs:
print "Saving"
saver.save(sess, os.path.join(args.logdir, 'export'))
except:
saver.save(sess, os.path.join(args.logdir, 'export'))
raise
if args.action == 2 or args.action == 3:
run_training()
end_time = time.time()
diffrence = end_time - full_start
hours = int(math.floor(diffrence/(60**2)))
diffrence-=60*60*hours
minutes = int(math.floor(diffrence/60))
diffrence-=60*minutes
seconds = diffrence
print "total time for training: {} hours, {} minutes, and {} seconds".format(hours,minutes,round(seconds))
|
|
# -*- coding: utf-8 -*-
"""
Snake module.
"""
from pygame import Rect
from constants import (MAX_HITPOINTS,
INIT_SPEED, MIN_SPEED, MAX_SPEED)
from constants import INVINCIBILITY_BLINK_RATE
from utils import add_vecs, sub_vecs, normalize, m_distance
from core.map import wrap_around, on_edge
# -- Directions --
EAST = (+1, 0)
WEST = (-1, 0)
NORTH = (0, -1)
SOUTH = (0, +1)
DIRECTIONS = {'E': EAST, 'W': WEST, 'N': NORTH, 'S': SOUTH}
STRAIGHT1_V = Rect(20, 20, 10, 10)
STRAIGHT1_H = Rect(20, 30, 10, 10)
STRAIGHT2_V = Rect(30, 20, 10, 10)
STRAIGHT2_H = Rect(30, 30, 10, 10)
N = 0x1
E = 0x2
S = 0x4
W = 0x8
SE = S + E
SW = S + W
NE = N + E
NW = N + W
STRAIGHT = 0x10
VERTICAL = 0x20
# Maps vectors to their corresponding direction flags.
VEC_TO_DIRFLAG = {(0, -1): N, (1, 0): E, (0, 1): S, (-1, 0): W}
HEAD = {N: Rect(00, 00, 10, 10), S: Rect(10, 10, 10, 10),
E: Rect(10, 00, 10, 10), W: Rect(00, 10, 10, 10)}
TAIL = {N: Rect(20, 00, 10, 10), S: Rect(30, 10, 10, 10),
E: Rect(30, 00, 10, 10), W: Rect(20, 10, 10, 10)}
TURN = {SE: Rect(00, 20, 10, 10), SW: Rect(10, 20, 10, 10),
NE: Rect(00, 30, 10, 10), NW: Rect(10, 30, 10, 10)}
def get_next_to_portal(pos, tilemap):
"""Determine whether pos is right next to a portal.
:return: The portal next to pos or None
"""
for portal in list(tilemap.portals.keys()):
if m_distance(portal, pos) == 1:
return portal
return None
def get_arrangement(snake, index, tilemap):
"""
Get the arrangement of a snake part in relation to it's neighboring
parts while taking the map and it's portals into account as well.
This is to determine which part of the skin texture to use for
rendering said part.
"""
vec_ax, vec_ay = vec_a = snake[index - 1]
vec_bx, vec_by = vec_b = snake[index]
vec_cx, vec_cy = vec_c = snake[index + 1]
vec_ba = sub_vecs(vec_a, vec_b)
vec_bc = sub_vecs(vec_c, vec_b)
ba_apart = m_distance(vec_a, vec_b) > 1
bc_apart = m_distance(vec_c, vec_b) > 1
a_on_edge = on_edge(vec_a)
if ba_apart:
if a_on_edge:
vec_ba = normalize(vec_ba)
portal = get_next_to_portal(vec_b, tilemap)
if portal:
vec_a = portal
vec_ax, vec_ay = vec_a
vec_ba = sub_vecs(vec_a, vec_b)
if bc_apart:
if a_on_edge:
vec_bc = normalize((-vec_bc[0], -vec_bc[1]))
portal = get_next_to_portal(vec_b, tilemap)
if portal:
vec_c = portal
vec_cx, vec_cy = vec_c
vec_bc = sub_vecs(vec_c, vec_b)
if vec_ax == vec_bx == vec_cx:
return VERTICAL | STRAIGHT
elif vec_ay == vec_by == vec_cy:
return STRAIGHT
else:
return VEC_TO_DIRFLAG[vec_ba] | VEC_TO_DIRFLAG[vec_bc]
class SnakeNormalState(object):
"""
The state, the snake is normally in.
"""
def __init__(self, snake):
self.snake = snake
def update(self, delta_time):
"""Update state."""
self.snake.move()
class SnakeInvincibleState(object):
"""
The state, the snake is in when it's invincible.
"""
def __init__(self, snake, lifetime):
self.snake = snake
self.snake.isinvincible = True
self.lifetime = lifetime
self.elapsed_lifetime = 0.
self.elapsed_blink = 0.
def update(self, delta_time):
"""Update state."""
self.elapsed_lifetime += delta_time
self.elapsed_blink += delta_time
if self.elapsed_blink >= INVINCIBILITY_BLINK_RATE:
self.elapsed_blink -= INVINCIBILITY_BLINK_RATE
self.snake.isvisible = not self.snake.isvisible
if self.elapsed_lifetime >= self.lifetime:
self.snake.change_state(SnakeNormalState(self.snake))
self.snake.move()
def leave(self):
"""Leave state."""
self.snake.isinvincible = False
self.snake.isvisible = True
class Snake(object):
"""
Represents a snake.
"""
def __init__(self, game, pos, skin, _id, killed_handler, config):
self.game = game
self.body_tag = '#p{0}-body'.format(_id)
self.head_tag = '#p{0}-head'.format(_id)
self.skin = skin
self.body = [pos, (pos[0] + 1, pos[1])]
self.heading = None
self._hitpoints = config.get('hp', MAX_HITPOINTS)
self._speed = config.get('speed', INIT_SPEED)
self._speed_bonus = 0
self.elapsed_t = 0.
self.grow = 0
self.isalive = True
self.isvisible = True
self.isinvincible = False
self.ismoving = False
self.curr_state = SnakeInvincibleState(self, 5)
self.killed_event = killed_handler
self.prev = self.body[:]
self.prev_heading = self.heading
@property
def hitpoints(self):
"""Return hitpoints."""
return self._hitpoints
@hitpoints.setter
def hitpoints(self, value):
"""Set hitpoints."""
if value > MAX_HITPOINTS:
self._hitpoints = MAX_HITPOINTS
elif value < 0:
self._hitpoints = 0
else:
self._hitpoints = value
@property
def speed(self):
"""Return speed."""
return self._speed
@speed.setter
def speed(self, value):
"""Set speed."""
if value > MAX_SPEED:
self._speed = MAX_SPEED
elif value < MIN_SPEED:
self._speed = MIN_SPEED
else:
self._speed = value
@property
def speed_bonus(self):
"""Return speed bonus."""
return self._speed_bonus
@speed_bonus.setter
def speed_bonus(self, value):
"""Set speed bonus."""
self._speed_bonus = value
def gain_speed(self, speed):
"""Increase (or decrease) speed."""
self.speed += speed
def gain_hitpoints(self, hitpoints):
"""Increase (or decrease) hitpoints."""
self.hitpoints += hitpoints
if self.hitpoints > MAX_HITPOINTS:
self.hitpoints = MAX_HITPOINTS
def set_heading(self, new_heading):
"""Set heading."""
self.prev_heading = self.heading
self.heading = (normalize(new_heading) if new_heading != (0, 0)
else new_heading)
def change_state(self, new_state):
"""Transit to another state."""
if hasattr(self.curr_state, 'leave'):
self.curr_state.leave()
self.curr_state = new_state
def take_damage(self, dmg, dealt_by, setback=False,
invincible=False, invinc_lifetime=0, shrink=0, slowdown=0):
"""Take damage."""
if not self.isinvincible:
self.hitpoints -= dmg
for _ in range(shrink):
if len(self.body) == 2:
break
if setback:
self.prev.pop()
else:
self.body.pop()
self.gain_speed(-slowdown)
if setback:
# Set the snake back to its previous position
self.body = self.prev[:]
xpos = 0
ypos = 0
if self.body[0][0] > self.body[1][0]:
xpos = 1
elif self.body[0][0] < self.body[1][0]:
xpos = -1
if self.body[0][1] > self.body[1][1]:
ypos = 1
elif self.body[0][1] < self.body[1][1]:
ypos = -1
self.set_heading((xpos, ypos))
self.prev_heading = (xpos, ypos)
if invincible and not self.isinvincible:
self.change_state(SnakeInvincibleState(self, invinc_lifetime))
if self.hitpoints <= 0:
self.isalive = False
self.killed_event(dealt_by)
def respawn(self, pos):
"""Respawn snake."""
self.body = [pos, (pos[0] + 1, pos[1])]
self._speed = INIT_SPEED
self.heading = None
self.prev_heading = None
self.ismoving = False
self.isalive = True
self.hitpoints = MAX_HITPOINTS
self.change_state(SnakeInvincibleState(self, 3.5))
self.elapsed_t = 0
def update(self, delta_time):
"""Update snake."""
if not self.isalive:
return
if self.ismoving:
self.elapsed_t += delta_time
self.curr_state.update(delta_time)
def move(self):
"""Move snake."""
if not self.ismoving:
return
self.body[0] = wrap_around(self.body[0])
# Move Snake
if self.elapsed_t >= 1. / (self._speed + self._speed_bonus):
self.prev = self.body[:]
self.elapsed_t -= 1. / (self._speed + self._speed_bonus)
self.body.insert(0, add_vecs(self.body[0], self.heading))
if self.grow == 0:
self.body.pop()
elif self.grow > 0:
self.grow -= 1
elif self.grow < 0:
for _ in range(-self.grow+1):
if len(self.body) == 2:
break
self.body.pop()
self.grow = 0
def draw(self):
"""Draw snake."""
if not self.isalive or not self.isvisible:
return
body_len = len(self.body)
area = None
tilemap = self.game.current_state.mode.tilemap
for index, part in enumerate(self.body):
if index == 0:
if self.heading and self.heading != (0, 0):
area = HEAD[VEC_TO_DIRFLAG[self.heading]]
else:
area = HEAD[W]
elif 0 < index < (body_len - 1):
argm = get_arrangement(self.body, index, tilemap)
if argm & STRAIGHT == STRAIGHT:
if argm & VERTICAL == VERTICAL:
if index % 2:
area = STRAIGHT1_V
else:
area = STRAIGHT2_V
else:
if index % 2:
area = STRAIGHT1_H
else:
area = STRAIGHT2_H
else:
area = TURN[argm & 15]
else:
if self.heading and self.heading != (0, 0):
tail = self.body[body_len-1]
second_last = self.body[body_len-2]
apart = m_distance(tail, second_last) > 1
if apart:
portal = get_next_to_portal(tail, tilemap)
if portal:
second_last = portal
vec = sub_vecs(second_last, tail)
area = TAIL[VEC_TO_DIRFLAG[normalize(vec)]]
else:
area = TAIL[W]
self.game.graphics.draw(self.skin, part, area=area)
def __setitem__(self, i, item):
self.body[i] = item
def __getitem__(self, i):
return self.body[i]
|
|
# import
import pandas as pd
from os.path import join
import xml.etree.ElementTree as ET
import os
import zipfile
from pypxlib import Table
from collections import OrderedDict
import shutil
'''
a shortcut to ET.SubElenment(parent, child).text ( and *.text = text )
'''
def add_node(parent, child, text):
if type(text)==str:
ET.SubElement(parent, child).text = text
else:
ET.SubElement(parent, child)
# add resource attribute
def add_attribute(resources, resource_name, attr_dict):
for resource in resources:
if resource.find('name').text == resource_name:
attr = ET.SubElement(resource, 'attribute')
for attr_name, attr_text in attr_dict.items():
add_node(attr, attr_name, attr_text)
'''
pretty print xml from http://effbot.org/zone/element-lib.htm#prettyprint
'''
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
'''
zip a directory - pilfered from the internet
'''
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def create_xml_template(tpl_name):
# load features and variables from csv
features_all = pd.read_csv('./WEAP_default_features.csv')
variables_all = pd.read_csv('./WEAP_default_variables.csv')
var_features = variables_all.feature.unique()
cat_features = features_all.feature.unique()
# template name
tpl = ET.Element('template_definition')
#tree = ET.ElementTree(tpl)
ET.SubElement(tpl, 'template_name').text = tpl_name
# define link lines
width = 4
linkstyle = {}
linkstyle['river'] = ['solid', 'blue', width]
linkstyle['diversion'] = ['solid', 'orange', width]
linkstyle['transmission_link'] = ['solid', 'green', width]
linkstyle['return_flow'] = ['solid', 'red', width]
linkstyle['runoff_infiltration'] = ['dashed', 'blue', width]
# add layout
layout = ET.SubElement(tpl, 'layout')
item = ET.SubElement(layout, 'item')
add_node(item, 'name', 'grouping')
value = ET.SubElement(item, 'value')
add_node(value, 'name', tpl_name)
add_node(value, 'description', tpl_name)
categories = ET.SubElement(value, 'categories')
cats = features_all.category.unique()
for cat in cats:
category = ET.SubElement(categories, 'category')
for attr in ['name', 'description', 'displayname']:
add_node(category, attr, cat)
groups = ET.SubElement(category, 'groups')
features = features_all[features_all.category==cat]
for f in features.itertuples():
if f.feature not in var_features:
pass
group = ET.SubElement(groups, 'group')
add_node(group, 'name', f.feature)
add_node(group, 'description', f.description)
add_node(group, 'displayname', f.displayname)
add_node(group, 'image', 'images\\%s.png' % f.feature)
# add resources
resources = ET.SubElement(tpl, 'resources')
# add a blank NETWORK resource if no NETWORK variables exist
if 'NETWORK' not in categories:
resource = ET.SubElement(resources, 'resource')
add_node(resource, 'type', 'NETWORK')
add_node(resource, 'name', 'key_assumptions')
# add features and variables
#for feature in var_features:
for f in features_all.itertuples():
if f.feature=='catchment':
pass
if f.feature not in cat_features:
continue
# get resource category
category = features_all[features_all.feature==f.feature].category.iloc[0][:-1].upper()
# add the resource subelement
resource = ET.SubElement(resources, 'resource')
# add resource layout info
add_node(resource, 'type', category)
add_node(resource, 'name', f.feature)
layout = ET.SubElement(resource, 'layout')
item = ET.SubElement(layout, 'item')
add_node(item, 'name', 'image')
add_node(item, 'value', 'images\\'+f.feature+'.png')
if category == 'LINK':
for i, iname in enumerate(['symbol','colour','line_weight']):
item = ET.SubElement(layout, 'item')
add_node(item, 'name', iname)
add_node(item, 'value', str(linkstyle[f.feature][i]))
item = ET.SubElement(layout, 'item')
add_node(item, 'name', 'group')
add_node(item, 'value', f.feature)
# add variables
feature_variables = variables_all[variables_all.feature == f.feature]
for v in feature_variables.itertuples():
if v.variable_type=='Water Quality':
continue
attr = ET.SubElement(resource, 'attribute')
add_node(attr, 'name', v.variable_name.replace(' ', '_'))
add_node(attr, 'dimension', v.dimension)
add_node(attr, 'unit', v.hydra_unit)
add_node(attr, 'is_var', 'N')
add_node(attr, 'data_type', 'descriptor')
# add basic result variables - inflow/outflow
for v in ['inflow','outflow']:
attr = ET.SubElement(resource, 'attribute')
add_node(attr, 'name', v)
add_node(attr, 'dimension', 'Volume')
add_node(attr, 'unit', '1e6 m^3')
add_node(attr, 'is_var', 'Y')
add_node(attr, 'data_type', 'timeseries')
return tpl#, tree
def make_type_dict(weapdir):
typedefs = Table(join(weapdir, '_Dictionary', 'NodeTypes.DB'))
type_dict = {}
for t in typedefs:
type_dict[t.TypeID] = str(t.Name.lower()).replace(' ','_').replace('/','_')
return type_dict
'''
convert paradox db to pandas df
'''
def px_to_df(pxdb):
with Table(pxdb) as units:
fields = list(units.fields)
rows = [(row[fields[0]], [row[field] for field in fields[1:]]) for row in units]
df = pd.DataFrame.from_items(rows, orient='index', columns=fields[1:])
return df
def add_custom_variables(tpl, weapdir, area):
areadir = join(weapdir, area)
# lookup dataframes for...
# type:
type_df = px_to_df(pxdb = join(weapdir, '_Dictionary', 'NodeTypes.DB'))
# catagory:
category_df = px_to_df(pxdb = join(weapdir, '_Dictionary', 'Category.DB'))
# units:
units_df = px_to_df(pxdb = join(areadir, 'Units.DB'))
# weap-hydra units
weap_hydra_units_df = pd.read_csv('weap_hydra_units.csv', index_col=0)
resources = tpl.find('resources')
# read user variables database
with Table(file_path=join(areadir, 'UserVariables.db')) as uservariables:
# loop through all user variables and add them to the template
for v in uservariables:
attr_dict = {}
# feature name
if v.TypeID:
resource_name = str(type_df.loc[v.TypeID].Name).lower().replace(' ','_').replace('/', '_')
else:
category = category_df.loc[v.CategoryID].Name
if category == 'Treatment': resource_name = 'Wastewater_Treatment_Plant'
elif category == 'Water Use': resource_name = 'Demand_Site'
# need to add more categories if needed, perhaps from lookup table
# determine units
weap_unit_name = units_df.loc[-v.NumUnitFieldID].Name
hydra_unit_abbr = weap_hydra_units_df.loc[weap_unit_name].Hydra_abbr
# data type
if v.IsInteger:
v_data_type = 'scalar'
else:
v_data_type = 'timeseries'
# write the variable info to a dictionary
attr_dict = OrderedDict()
attr_dict['name'] = str(v.DisplayLabel).replace(' ','_')
#attr_dict['description'] = v.GridComment
attr_dict['dimension'] = 'Volume'
attr_dict['unit'] = hydra_unit_abbr
attr_dict['is_var'] = 'Y'
attr_dict['data_type'] = v_data_type
# write the variables to template, under resources
add_attribute(resources, resource_name, attr_dict)
def write_template_xml(tpl, tree, tpl_name):
# prettify
indent(tpl)
# write to file
fout = join(tpl_name, './template/template.xml')
tree.write(fout)
def create_template_zipfile(tpl_name):
# create the zipfile
zipf = zipfile.ZipFile(tpl_name + '.zip', 'w', zipfile.ZIP_DEFLATED)
zipd = tpl_name + '/template'
zipdir(zipd, zipf)
zipf.close()
def main(tpl_name, custom_area, weapdir, write_template=True, direct_import=True, outdir=None):
# check if input requirements are met
if write_template and outdir==None:
return
# create template xml
tpl = create_xml_template(tpl_name)
# update template from specific model
if custom_area:
add_custom_variables(tpl, weapdir, custom_area)
# create tree
tree = ET.ElementTree(tpl)
## 1. write template to xml file and create hydra-friendly zip file
if write_template:
# remove old template directory
tpl_path = join(outdir, tpl_name)
if os.path.exists(tpl_path):
shutil.rmtree(tpl_path)
# create new template directory
os.mkdir(tpl_path)
shutil.copytree(src='template', dst=join(tpl_path, 'template'))
# write template xml to file
write_template_xml(tpl, tree, tpl_name)
# create template zipfile for import to Hydra
create_template_zipfile(tpl_name)
## 2. import xml directly
if __name__ == '__main__':
weapdir = r'C:\Users\L03060467\Documents\WEAP Areas'
#custom_area = 'Weaping River Basin'
custom_area = None
if custom_area:
tpl_name = custom_area
else:
tpl_name = 'WEAP'
outdir = '.'
write_template = True
direct_import = False
main(tpl_name, custom_area, weapdir=weapdir, write_template=True, direct_import=False, outdir=outdir)
print('finished')
|
|
# copied from https://github.com/openai/universe-starter-agent/blob/master/envs.py
import cv2
from gym.spaces.box import Box
import numpy as np
import gym
from gym import spaces
import logging
import universe
from universe import vectorized
from universe.wrappers import BlockingReset, GymCoreAction, EpisodeID, Unvectorize, Vectorize, Vision, Logger
from universe import spaces as vnc_spaces
from universe.spaces.vnc_event import keycode
import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
universe.configure_logging()
def create_env(env_id, client_id, remotes=1, **kwargs):
spec = gym.spec(env_id)
if spec.tags.get('flashgames', False):
return create_flash_env(env_id, client_id, remotes, **kwargs)
elif spec.tags.get('atari', False) and spec.tags.get('vnc', False):
return create_vncatari_env(env_id, client_id, remotes, **kwargs)
else:
# Assume atari.
assert "." not in env_id # universe environments have dots in names.
return create_atari_env(env_id)
def create_flash_env(env_id, remotes=1, **_):
env = gym.make(env_id)
env = Vision(env)
env = Logger(env)
env = BlockingReset(env)
reg = universe.runtime_spec('flashgames').server_registry
height = reg[env_id]["height"]
width = reg[env_id]["width"]
env = CropScreen(env, height, width, 84, 18)
env = FlashRescale(env)
keys = ['left', 'right', 'up', 'down', 'x']
if env_id == 'flashgames.NeonRace-v0':
# Better key space for this game.
keys = ['left', 'right', 'up', 'left up', 'right up', 'down', 'up x']
logger.info('create_flash_env(%s): keys=%s', env_id, keys)
env = DiscreteToFixedKeysVNCActions(env, keys)
env = EpisodeID(env)
env = DiagnosticsInfo(env)
env = Unvectorize(env)
env.configure(fps=30.0, remotes=remotes, start_timeout=15 * 60,
vnc_driver='go', vnc_kwargs={
'encoding': 'tight', 'compress_level': 0,
'fine_quality_level': 50, 'subsample_level': 3})
return env, height, width
def create_vncatari_env(env_id, client_id, remotes, **_):
env = gym.make(env_id)
env = Vision(env)
env = Logger(env)
env = BlockingReset(env)
env = GymCoreAction(env)
env = AtariRescale42x42(env)
env = EpisodeID(env)
env = DiagnosticsInfo(env)
env = Unvectorize(env)
logger.info('Connecting to remotes: %s', remotes)
fps = env.metadata['video.frames_per_second']
env.configure(remotes=remotes, start_timeout=15 * 60, fps=fps, client_id=client_id)
return env
def create_atari_env(env_id):
env = gym.make(env_id)
env = Vectorize(env)
env = AtariRescale42x42(env)
env = DiagnosticsInfo(env)
env = Unvectorize(env)
return env
def DiagnosticsInfo(env, *args, **kwargs):
return vectorized.VectorizeFilter(env, DiagnosticsInfoI, *args, **kwargs)
class DiagnosticsInfoI(vectorized.Filter):
def __init__(self, log_interval=503):
super(DiagnosticsInfoI, self).__init__()
self._episode_time = time.time()
self._last_time = time.time()
self._local_t = 0
self._log_interval = log_interval
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
self._num_vnc_updates = 0
self._last_episode_id = -1
def _after_reset(self, observation):
logger.info('Resetting environment')
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
return observation
def _after_step(self, observation, reward, done, info):
to_log = {}
if self._episode_length == 0:
self._episode_time = time.time()
self._local_t += 1
if info.get("stats.vnc.updates.n") is not None:
self._num_vnc_updates += info.get("stats.vnc.updates.n")
if self._local_t % self._log_interval == 0:
cur_time = time.time()
elapsed = cur_time - self._last_time
fps = self._log_interval / elapsed
self._last_time = cur_time
cur_episode_id = info.get('vectorized.episode_id', 0)
to_log["diagnostics/fps"] = fps
if self._last_episode_id == cur_episode_id:
to_log["diagnostics/fps_within_episode"] = fps
self._last_episode_id = cur_episode_id
if info.get("stats.gauges.diagnostics.lag.action") is not None:
to_log["diagnostics/action_lag_lb"] = info["stats.gauges.diagnostics.lag.action"][0]
to_log["diagnostics/action_lag_ub"] = info["stats.gauges.diagnostics.lag.action"][1]
if info.get("reward.count") is not None:
to_log["diagnostics/reward_count"] = info["reward.count"]
if info.get("stats.gauges.diagnostics.clock_skew") is not None:
to_log["diagnostics/clock_skew_lb"] = info["stats.gauges.diagnostics.clock_skew"][0]
to_log["diagnostics/clock_skew_ub"] = info["stats.gauges.diagnostics.clock_skew"][1]
if info.get("stats.gauges.diagnostics.lag.observation") is not None:
to_log["diagnostics/observation_lag_lb"] = info["stats.gauges.diagnostics.lag.observation"][0]
to_log["diagnostics/observation_lag_ub"] = info["stats.gauges.diagnostics.lag.observation"][1]
if info.get("stats.vnc.updates.n") is not None:
to_log["diagnostics/vnc_updates_n"] = info["stats.vnc.updates.n"]
to_log["diagnostics/vnc_updates_n_ps"] = self._num_vnc_updates / elapsed
self._num_vnc_updates = 0
if info.get("stats.vnc.updates.bytes") is not None:
to_log["diagnostics/vnc_updates_bytes"] = info["stats.vnc.updates.bytes"]
if info.get("stats.vnc.updates.pixels") is not None:
to_log["diagnostics/vnc_updates_pixels"] = info["stats.vnc.updates.pixels"]
if info.get("stats.vnc.updates.rectangles") is not None:
to_log["diagnostics/vnc_updates_rectangles"] = info["stats.vnc.updates.rectangles"]
if info.get("env_status.state_id") is not None:
to_log["diagnostics/env_state_id"] = info["env_status.state_id"]
if reward is not None:
self._episode_reward += reward
if observation is not None:
self._episode_length += 1
self._all_rewards.append(reward)
if done:
logger.info('Episode terminating: episode_reward=%s episode_length=%s', self._episode_reward, self._episode_length)
total_time = time.time() - self._episode_time
to_log["global/episode_reward"] = self._episode_reward
to_log["global/episode_length"] = self._episode_length
to_log["global/episode_time"] = total_time
to_log["global/reward_per_time"] = self._episode_reward / total_time
self._episode_reward = 0
self._episode_length = 0
self._all_rewards = []
return observation, reward, done, to_log
def _process_frame42(frame):
frame = frame[34:34+160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [42, 42, 1])
return frame
class AtariRescale42x42(vectorized.ObservationWrapper):
def __init__(self, env=None):
super(AtariRescale42x42, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [42, 42, 1])
def _observation(self, observation_n):
return [_process_frame42(observation) for observation in observation_n]
class FixedKeyState(object):
def __init__(self, keys):
self._keys = [keycode(key) for key in keys]
self._down_keysyms = set()
def apply_vnc_actions(self, vnc_actions):
for event in vnc_actions:
if isinstance(event, vnc_spaces.KeyEvent):
if event.down:
self._down_keysyms.add(event.key)
else:
self._down_keysyms.discard(event.key)
def to_index(self):
action_n = 0
for key in self._down_keysyms:
if key in self._keys:
# If multiple keys are pressed, just use the first one
action_n = self._keys.index(key) + 1
break
return action_n
class DiscreteToFixedKeysVNCActions(vectorized.ActionWrapper):
"""
Define a fixed action space. Action 0 is all keys up. Each element of keys can be a single key or a space-separated list of keys
For example,
e=DiscreteToFixedKeysVNCActions(e, ['left', 'right'])
will have 3 actions: [none, left, right]
You can define a state with more than one key down by separating with spaces. For example,
e=DiscreteToFixedKeysVNCActions(e, ['left', 'right', 'space', 'left space', 'right space'])
will have 6 actions: [none, left, right, space, left space, right space]
"""
def __init__(self, env, keys):
super(DiscreteToFixedKeysVNCActions, self).__init__(env)
self._keys = keys
self._generate_actions()
self.action_space = spaces.Discrete(len(self._actions))
def _generate_actions(self):
self._actions = []
uniq_keys = set()
for key in self._keys:
for cur_key in key.split(' '):
uniq_keys.add(cur_key)
for key in [''] + self._keys:
split_keys = key.split(' ')
cur_action = []
for cur_key in uniq_keys:
cur_action.append(vnc_spaces.KeyEvent.by_name(cur_key, down=(cur_key in split_keys)))
self._actions.append(cur_action)
self.key_state = FixedKeyState(uniq_keys)
def _action(self, action_n):
# Each action might be a length-1 np.array. Cast to int to
# avoid warnings.
return [self._actions[int(action)] for action in action_n]
class CropScreen(vectorized.ObservationWrapper):
"""Crops out a [height]x[width] area starting from (top,left) """
def __init__(self, env, height, width, top=0, left=0):
super(CropScreen, self).__init__(env)
self.height = height
self.width = width
self.top = top
self.left = left
self.observation_space = Box(0, 255, shape=(height, width, 3))
def _observation(self, observation_n):
return [ob[self.top:self.top+self.height, self.left:self.left+self.width, :] if ob is not None else None
for ob in observation_n]
def _process_frame_flash(frame):
frame = cv2.resize(frame, (200, 128))
frame = frame.mean(2).astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [1,128,200])
return frame
class FlashRescale(vectorized.ObservationWrapper):
def __init__(self, env=None):
super(FlashRescale, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [128, 200, 1])
def _observation(self, observation_n):
return [_process_frame_flash(observation) for observation in observation_n]
|
|
#----------------------------------------------------------------------
# Copyright (c) 2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from GenericInstaller import GenericInstaller
from gram.am.gram import config
class Quantum(GenericInstaller):
quantum_directory = "/etc/quantum"
quantum_conf_filename = "quantum.conf"
quantum_l3_agent_filename = "l3_agent.ini"
quantum_plugin_directory = "/etc/quantum/plugins/openvswitch"
quantum_plugin_conf_filename = "ovs_quantum_plugin.ini"
quantum_dhcp_conf_filename = "dhcp_agent.ini"
quantum_api_conf_filename = "api-paste.ini"
service_tenant_name = "service"
quantum_metadata_file = "/etc/quantum/metadata_agent.ini"
# Return a list of command strings for installing this component
def installCommands(self):
control_address = config.control_address
metadata_port = config.metadata_port
quantum_user = config.network_user
quantum_password = config.network_password
rabbit_password = config.rabbit_password
os_password = config.os_password
backup_directory = config.backup_directory
public_gateway_ip = config.public_gateway_ip
public_subnet_cidr = config.public_subnet_cidr
public_subnet_start_ip = config.public_subnet_start_ip
public_subnet_end_ip = config.public_subnet_end_ip
mgmt_if = config.management_interface
mgmt_net_name = config.management_network_name
mgmt_net_cidr = config.management_network_cidr
mgmt_net_vlan = config.management_network_vlan
connection = "sql_connection = mysql:\/\/"+ quantum_user + ":" + quantum_password + "@" + control_address + "\/quantum"
self.comment("*** Quantum Install ***")
self.comment("Install packages")
self.comment("Configure quantum services")
#self.backup(self.quantum_directory, backup_directory, self.quantum_conf_filename)
#self.backup(self.quantum_plugin_directory, backup_directory, self.quantum_plugin_conf_filename)
self.backup(self.quantum_directory, backup_directory, self.quantum_api_conf_filename)
self.sed("s/sql_connection.*/" + connection + "/",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^.*Example: tenant_network_type.*/tenant_network_type=vlan/",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
#self.sed("s/^\# tenant_network_type.*/tenant_network_type=vlan/", \
# self.quantum_plugin_directory + "/" + \
# self.quantum_plugin_conf_filename)
self.sed("s/^tunnel_id_ranges.*//",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^integration_bridge.*//",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^tunnel_bridge.*//", \
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^local_ip.*//", \
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^enable_tunneling.*//", \
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^\# root_helper sudo \/usr.*/root_helper = sudo \/usr\/bin\/quantum-rootwrap \/etc\/quantum\/rootwrap.conf/",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
#self.sed("s/\# Default: tenant_network_type.*/tenant_network_type=vlan/",
# self.quantum_plugin_directory + "/" + \
# self.quantum_plugin_conf_filename)
# TODO: Figure out ranges
self.sed("s/\# Default: network_vlan_ranges.*/network_vlan_ranges=physnet1:1000:2100,physnet2:2101:3000/",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/\# Default: bridge_mappings.*/bridge_mappings=physnet1:br-" + config.data_interface + ",physnet2:br-" + config.management_interface + "/",
self.quantum_plugin_directory + "/" + \
self.quantum_plugin_conf_filename)
self.sed("s/^# firewall_driver.*/firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver/",self.quantum_plugin_directory + "/" + self.quantum_plugin_conf_filename)
#self.sed("s/^\[filter:authtoken\].*/\[filter:authtoken\]\nauth_host = control_address\nauth_port = 35357\nauth_protocol = http\nadmin_tenant_name = service\nadmin_user = quantum\nadmin_password = service_pass\n" + "/", \
# self.quantum_plugin_directory + "/" + \
# self.quantum_api_conf_filename)
self.sed("s/^\[filter:authtoken\].*//",self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.sed("s/^paste.filter_factory = keystonec.*//",self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("[filter:authtoken]",self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory",self.quantum_directory+ "/" + self.quantum_api_conf_filename)
self.appendToFile("auth_host =" + control_address, self.quantum_directory + "/" + self.quantum_api_conf_filename )
self.appendToFile("auth_port = 35357", self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("auth_protocol = http", self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("admin_tenant_name = service", self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("admin_user = " + quantum_user, self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("admin_password = service_pass", self.quantum_directory + "/" + self.quantum_api_conf_filename)
self.appendToFile("rabbit_host = " + control_address,self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^auth_host.*/auth_host =" + control_address + "/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^auth_port.*/auth_port = 35357/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^auth_protocol.*/auth_protocol = http/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^signing_dir.*/signing_dir = \/var\/lib\/quantum\/keystone-signing/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^admin_tenant_name.*/admin_tenant_name = service/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^admin_user.*/admin_user = " + quantum_user + "/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^admin_password.*/admin_password = service_pass/", self.quantum_directory + "/" + self.quantum_conf_filename)
self.sed("s/^\# allow_overlapping_ips .*/allow_overlapping_ips = True/",self.quantum_directory + "/" + self.quantum_conf_filename)
self.writeToFile("[DEFAULT]",self.quantum_metadata_file)
self.appendToFile("# The Quantum user information for accessing the Quantum API.",self.quantum_metadata_file)
self.appendToFile("auth_url = http://" + control_address + ":35357/v2.0",self.quantum_metadata_file)
self.appendToFile("auth_region = RegionOne",self.quantum_metadata_file)
self.appendToFile("admin_tenant_name = service",self.quantum_metadata_file)
self.appendToFile("admin_user = quantum",self.quantum_metadata_file)
self.appendToFile("admin_password =service_pass",self.quantum_metadata_file)
self.appendToFile("nova_metadata_ip =" + control_address,self.quantum_metadata_file)
self.appendToFile("nova_metadata_port = 8775",self.quantum_metadata_file)
self.appendToFile("metadata_proxy_shared_secret = helloOpenStack",self.quantum_metadata_file)
# SD: Will need to double check that this works, not in the installation instructions
self.add("source /etc/novarc")
self.add("service quantum-server restart")
self.add("service quantum-plugin-openvswitch-agent restart")
self.add("service quantum-dhcp-agent restart")
self.add("service quantum-l3-agent restart")
self.add("for i in `seq 1 30`; do")
self.add(" echo 'checking quantum status'")
self.add(" quantum net-list > /dev/null")
self.add(" if [ $? -eq 0 ]; then")
self.add(" break")
self.add(" fi")
self.add(" sleep .5")
self.add("done")
self.add("echo 'Creating management network'")
self.add("quantum net-create " + mgmt_net_name + " --provider:network_type vlan --provider:physical_network physnet2 --provider:segmentation_id " + mgmt_net_vlan + " --shared")
self.add("echo 'Creating management subnet'")
self.add("quantum subnet-create " + mgmt_net_name + " " + mgmt_net_cidr + " --dns-nameservers " + config.public_dns_nameservers)
self.add('export MGMT_SUBNET_ID=`quantum net-list | grep ' + mgmt_net_name + ' | cut -d "|" -f 4`')
self.add("echo 'Creating external network'")
self.add("quantum net-create public --router:external=True")
self.add('export PUBLIC_NET_ID=`quantum net-list | grep public | cut -d " " -f 2`')
self.add("echo 'Creating external subnet'")
self.add("quantum subnet-create --allocation_pool" +
" start=" + public_subnet_start_ip +
",end=" + public_subnet_end_ip +
" --gateway=" + public_gateway_ip +
" $PUBLIC_NET_ID " + public_subnet_cidr +
" -- --enable_dhcp=False")
self.add("echo 'Creating router'")
self.add("quantum router-create externalRouter")
self.add("quantum router-gateway-set externalRouter $PUBLIC_NET_ID")
self.add("quantum router-interface-add externalRouter $MGMT_SUBNET_ID")
self.add('export EXTERNAL_ROUTER_ID=`quantum router-list | grep externalRouter | cut -d " " -f 2`')
## We need now to configure again the L3 agent in editing
## /etc/quantum/l3_agent.ini file and modify the values for
## router (id from 'quantum router list')
## and
## external network (id from 'quantum net list')
self.sed("/^gateway_external_network_id/ s/^/# /",
self.quantum_directory + "/" + self.quantum_l3_agent_filename)
self.sed("s/^\# gateway_external_network_id.*/gateway_external_network_id=$PUBLIC_NET_ID/",
self.quantum_directory + "/" + self.quantum_l3_agent_filename)
self.sed("/^router_id/ s/^/# /",
self.quantum_directory + "/" + self.quantum_l3_agent_filename)
self.sed("s/\# router_id.*/router_id=$EXTERNAL_ROUTER_ID/",
self.quantum_directory + "/" + self.quantum_l3_agent_filename)
self.sed("s/^\# use_namespaces.*/use_namespaces = True/", self.quantum_directory + "/" + self.quantum_l3_agent_filename)
self.add("service quantum-l3-agent restart")
self.add("export PYTHONPATH=$PYTHONPATH:/opt/gcf/src:/home/gram/gram/src:/home/gram/gram/grizzly:/home/gram/gram/src/gram/am/gram")
self.sed("s/^\# use_namespaces.*/use_namespaces = True/", self.quantum_directory + "/" + self.quantum_dhcp_conf_filename)
# Return a list of command strings for uninstalling this component
def uninstallCommands(self):
backup_directory = config.backup_directory
self.comment("*** Quantum Uninstall ***")
self.restore(self.quantum_directory, backup_directory, self.quantum_conf_filename)
self.restore(self.quantum_plugin_directory, backup_directory, \
self.quantum_plugin_conf_filename)
self.restore(self.quantum_directory, backup_directory, \
self.quantum_l3_agent_filename)
self.restore(self.quantum_directory, backup_directory, \
self.quantum_dhcp_conf_filename)
|
|
#!/usr/bin/python -tt
# omega - python client
# https://github.com/jfillmore/Omega-API-Engine
#
# Copyright 2011, Jonathon Fillmore
# Licensed under the MIT license. See LICENSE file.
# http://www.opensource.org/licenses/mit-license.php
"""Omega client for talking to an Omega server."""
import pycurl
import urllib
import httplib
try:
import json
except:
import simplejson
json = simplejson
import re
import sys
import dbg
import StringIO
import os
import tempfile
import base64
import hashlib
import socket
from error import Exception
import util
class OmegaClient:
"""Client for talking to an Omega Server."""
_version = '0.2'
_http = None
_hostname = None
_folder = '/'
_url = None
_session_coookie = None
_cookie_file = None
_useragent = 'OmegaClient/0.2'
def __init__(self, url = 'localhost', credentials = None, port = 5800, use_https = True):
self._cookie_file = os.path.expanduser('~/.omega_cookie') # tempfile.NamedTemporaryFile()
self.set_https(use_https)
self.set_credentials(credentials)
self.set_port(port)
self.set_url(url)
# TODO: python 2.7 supports an order tuple object we can use to preserve order :)
self.encode = json.JSONEncoder().encode
self.decode = json.JSONDecoder().decode
# setup cookie jar
def set_url(self, url):
if url != '':
self._url = url
if url.find('/') == -1:
self._hostname = url
self._folder = ''
else:
# check for protocol
if url.lower()[0:7] == 'http://':
url = url[7:]
self.set_https(False)
elif url.lower()[0:8] == 'https://':
url = url[8:]
self.set_https(True)
else:
# some other protocol perhaps?
if re.match('^\w+://', url):
raise Exception('Only HTTP and HTTPS are supported protocols.')
# do we have a port?
match = re.match('^(\w+\.)*\w+(:(\d{0,5}))/?', url)
if match:
self.set_port(match.group(3))
# chop out the port
url = '/'.join((re.split(':\d+/?', url)))
(self._hostname, self._folder) = url.split('/', 1)
# force ourself to end in a slash
if not self._folder.endswith('/'):
self._folder = ''.join((self._folder, '/'))
else:
raise Exception('Invalid API service URL: %s.' % url)
# having set everything, init httplib
if self._use_https:
self._http = httplib.HTTPSConnection(self._hostname, self._port)
else:
self._http = httplib.HTTPConnection(self._hostname, self._port)
self._http.cookies = False
def get_url(self):
return self._url
def get_hostname(self):
return self._hostname
def get_folder(self):
return self._folder
def set_credentials(self, creds):
if type(creds).__name__ == 'dict':
# make sure we have a user/pass
if 'username' in creds and 'password' in creds:
self._credentials = creds
elif 'token' in creds:
self._credentials = creds
else:
raise Exception("Invalid credentials. Keys of 'username'/'password' or 'token' expected, but were not found.")
elif creds == None:
self._credentials = creds
else:
raise Exception("Invalid credentials. Keys of 'username'/'password' or 'token' expected, but were not found.")
def set_https(self, secure = True):
if secure:
self._use_https = True
else:
self._use_https = False
def set_port(self, port):
if port >= 0 and port <= 65535:
self._port = port
else:
raise Exception('Invalid API service port: %i.' % port)
# old style API invoker
def run(self, api, args = (), raw_response = False, full_response = False, get = None, post = None, files = None):
# check and prep the data
if api == '':
raise Exception("Invalid service API: '%s'." %api)
api = urllib.quote(api)
curl = pycurl.Curl()
data = [
('OMEGA_ENCODING', (curl.FORM_CONTENTS, 'json')),
('OMEGA_API_PARAMS', (curl.FORM_CONTENTS, self.encode(args)))
]
if self._credentials:
data.append(('OMEGA_CREDENTIALS', (curl.FORM_CONTENTS, self.encode(self._credentials))))
# include any extra post data
if post:
(name, value) = post.split('=', 1)
data.append((name, (curl.FORM_CONTENTS, value)))
if files:
# add in our files to the data
for name in files:
data.append((name, (curl.FORM_FILE, files[name])))
# figure our our URL and get args
url = self._url
if self._use_https:
url = ''.join(('https://', self._hostname))
else:
url = ''.join(('http://', self._hostname))
url = '/'.join((':'.join((url, str(self._port))), self._folder))
url = '/'.join((url, api))
url = re.sub(r'/+', '/', util.pretty_path(url)).replace(':/', '://', 1)
if get:
url = '?'.join((url, get))
# fire away
curl.setopt(curl.URL, url)
curl.setopt(curl.POST, 1)
curl.setopt(curl.USERAGENT, self._useragent)
curl.setopt(curl.COOKIEFILE, self._cookie_file)
curl.setopt(curl.COOKIEJAR, self._cookie_file)
if self._use_https:
curl.setopt(curl.SSL_VERIFYPEER, 0) # TODO: don't always assume
curl.setopt(curl.SSL_VERIFYHOST, 0) # TODO: don't always assume
if data:
curl.setopt(curl.HTTPPOST, data)
else:
curl.setopt(curl.POSTFIELDS, '&'.join(args))
response = StringIO.StringIO()
curl.setopt(curl.WRITEFUNCTION, response.write)
curl.perform()
response = response.getvalue()
http_code = curl.getinfo(curl.HTTP_CODE)
content_type = curl.getinfo(curl.CONTENT_TYPE) or "";
if http_code < 200 or http_code >= 300:
# see if we got json data back
try:
if content_type.startswith("application/json"):
decoded = self.decode(response)
if 'reason' in decoded:
error = decoded['reason']
else:
error = response
else:
error = response
except:
error = response
raise Exception("Server returned HTTP code %s. Response:\n%s" %
(str(http_code), str(error)))
curl.close()
if raw_response:
return response
else:
# decode the response and check whether or not it was successful
# TODO: check response encoding in header
try:
if content_type.startswith("application/json"):
response = self.decode(response)
else:
return response
except:
raise Exception('Failed to decode API response.', response)
# check to see if our API call was successful
if 'result' in response and response['result'] == False:
if 'reason' in response:
if full_response:
raise Exception('API "%s" failed.\n%s' %
(urllib.unquote(api), dbg.obj2str(response)))
else:
raise Exception(response['reason'])
else:
raise Exception('API "%s" failed, but did not provide an explanation. Response: %s' % (api, response))
else:
if full_response:
return response
else:
if 'data' in response:
return response['data']
else:
return None
def get(self, api, params, opts):
return self.request('GET', api, params, opts);
def post(self, api, params, opts = {}):
return self.request('POST', api, params, opts);
def put(self, api, params, opts = {}):
return self.request('PUT', api, params, opts);
def delete(self, api, params, opts = {}):
return self.request('DELETE', api, params, opts);
def request(self, method, api, params = (), raw_response = False, full_response = False, get = None, headers = {}, verbose = False, no_format = False):
'''New REST-friendly API invoker'''
# check and prep the data
if method is None or method == '':
method = 'GET'
method = method.upper()
if api == '' or api == None:
raise Exception("Invalid service API: '%s'." %api)
api = urllib.quote(api)
if self._credentials:
creds = self._credentials
md5 = hashlib.md5();
md5.update(':'.join(
[creds['username'], creds['password']]
))
headers['Authentication'] = 'Basic ' + base64.b64encode(
md5.hexdigest())
http = self._http
# figure our our URL and get args
url = self._url
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
url = util.pretty_path('/'.join(('', self._folder, api)), True)
if get:
url = '?'.join((url, get))
if method == 'GET':
url = '?'.join((url, '&'.join([
'='.join(
(urllib.quote(name), urllib.quote(str(params[name])))
) for name in params
])))
data = None
else:
data = self.encode(params)
# fire away
if verbose:
if self._use_https:
proto = 'https'
else:
proto = 'http'
sys.stderr.write(
'# Request: %s %s://%s:%d%s, params: "%s", headers: "%s", cookies: "%s"\n' %
((method, proto, self._hostname, self._port, url, data, str(headers), str(http.cookies)))
)
#http.request(method, url, data, headers)
# be willing to try again if the socket got closed on us (e.g. timeout)
tries = 0
max_tries = 3
response = None
while tries < max_tries and response is None:
tries += 1
try:
# start the request
http.putrequest(method, url)
# send our headers
for hdr, value in headers.iteritems():
http.putheader(hdr, value);
# and our cookies too!
if http.cookies:
[http.putheader('Cookie', value) for value in http.cookies]
# write the body
header_names = headers.fromkeys([k.lower() for k in headers])
if data:
body_len = len(data)
if body_len:
http.putheader('Content-Length', str(body_len))
http.endheaders()
if data:
http.send(data)
# get our response back from the server and parse
response = http.getresponse()
except socket.error, v:
http.connect()
except:
http.close()
if response is None:
raise Exception('HTTP request failed and could not be retried.')
# see if we get a cookie back
response_headers = str(response.msg).split('\n');
cookies = [c.split(': ')[1].split('; ')[0] for c in response_headers if c.startswith('Set-Cookie: ')]
if cookies:
# note that we ignore the path
if verbose:
for cookie in cookies:
sys.stderr.write('# Response Cookie: %s\n' % (cookie))
http.cookies = cookies
if verbose:
sys.stderr.write(
'# Response Status: %s %s\n# Response Headers: %s\n' %
(response.status, response.reason, self.encode(
str(response.msg).strip().split('\r\n')
))
)
content_type = response.getheader('Content-Type') or '';
response_data = response.read();
# handle any errors based on status code
if response.status < 200 or response.status >= 300:
if content_type.startswith("application/json"):
try:
result = self.decode(response_data)
except:
result = {"result": False, "reason": response_data}
if 'reason' in result:
error = result['reason']
else:
error = 'An unknown error has occurred.'
else:
result = response_data
error = response_data
if full_response:
if raw_response:
msg = response_data
else:
msg = dbg.obj2str(result)
raise Exception('API "%s" failed (%d %s)\n%s' %
(urllib.unquote(api), response.status, response.reason, msg))
else:
if raw_response:
msg = response_data
else:
msg = error
raise Exception('API "%s" failed (%d %s)\n%s' %
(api, response.status, response.reason, msg))
# return a raw response if needed; otherwise decode if JSON
if not content_type.startswith("application/json"):
return response_data
try:
result = self.decode(response_data)
except:
raise Exception('Failed to decode API result\n' + response_data)
# check to see if our API call was successful
if 'result' in result and result['result'] == False:
if 'reason' in result:
if full_response:
raise Exception('"%s" failed (%d %s):\n%s' %
(urllib.unquote(api), response.status, response.reason, dbg.obj2str(result)))
else:
raise Exception(result['reason'])
else:
raise Exception('API "%s" failed\n%s' % (api, result))
else:
# all is well, return the data portion of the response (unless everything is requested)
if full_response:
if raw_response:
if no_format:
result = json.dumps(result, sort_keys = True) + "\n"
else:
result = json.dumps(result, sort_keys = True, indent = 4) + "\n"
else:
if raw_response:
if 'data' in result:
if no_format:
result = self.encode(result['data']) + "\n"
else:
result = json.dumps(result['data'], sort_keys = True, indent = 4) + "\n"
else:
result = '{}'
else:
if 'data' in result:
result = result['data']
else:
result = None
return result
if __name__ == '__main__':
import dbg
dbg.pretty_print(OmegaClient());
|
|
import abc
class A(object):
def __init__(self):
self._x = 1
@property
def foo(self):
return self._x
@foo.setter
def foo(self, x):
self._x = x
@foo.deleter
def foo(self):
pass
@property
def boo(self):
return self._x
<warning descr="Names of function and decorator don't match; property accessor is not created">@boo.setter</warning>
def boo1(self, x):
self._x = x
<warning descr="Names of function and decorator don't match; property accessor is not created">@boo.deleter</warning>
def boo2(self):
pass
@property
def <warning descr="Getter should return or yield something">moo</warning>(self):
pass
@moo.setter
def <warning descr="Setter should not return a value">moo</warning>(self, x):
return 1
@moo.deleter
def <warning descr="Deleter should not return a value">moo</warning>(self):
return self._x
@qoo.setter # unknown qoo is reported in ref inspection
def qoo(self, v):
self._x = v
@property
def futuroo(self):
raise NotImplementedError("Override!") # ok though no return
@property
def futuroo(self):
"""Docstring."""
raise NotImplementedError("Override!") # ok though no return
@property
def xoo(self):
return self._x
@xoo.setter
def xoo(self, x):
self._x = x
return
get_foo2 = lambda self: 'foo2'
foo2 = property(get_foo2)
@property
@abc.abstractproperty
def abstract_property(self):
pass
# PY-19701
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
def inner_func(n):
return n
self._myprop = inner_func(val)
myprop = property(get_myprop, set_myprop)
# all flows have exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
if a > b:
<error descr="Python versions < 3.3 do not allow 'return' with argument inside generator.">return self._myprop</error>
elif a < b:
raise self._myprop
else:
yield self._myprop
myprop = property(get_myprop)
# some flows have not exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
if a > b:
return self._myprop
elif a < b:
raise self._myprop
myprop = property(get_myprop)
# some flows have not exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
if a > b:
return self._myprop
myprop = property(get_myprop)
# non-empty for
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
for i in range(5):
yield i
myprop = property(get_myprop)
# empty for
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
for i in []:
yield i
myprop = property(get_myprop) # shouldn't pass with better analysis, pass at the moment
# non-empty while
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
i = 0
while i < 5:
yield i
i += 1
myprop = property(get_myprop)
# empty while
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
while undefined:
yield i
myprop = property(get_myprop) # shouldn't pass with better analysis, pass at the moment
# non-empty while with two conditions
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
i = 0
j = 0
while i < 5 and j == 0:
yield i
i += 1
myprop = property(get_myprop)
# empty while with two conditions
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
i = 0
j = 0
while i > 5 and j == 0:
yield i
myprop = property(get_myprop) # shouldn't pass with better analysis, pass at the moment
# setter has exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
return 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
yield 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has raise statement
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
raise NotImplementedError()
myprop = property(get_myprop, set_myprop)
# setter has exit point in some flow
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
if a > b:
return 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has exit point in some flow
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
if a > b:
yield 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has raise statement in some flow
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
if a > b:
raise NotImplementedError()
myprop = property(get_myprop, set_myprop)
|
|
#
# File : keil.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
def _get_filetype(fn):
if fn.rfind('.cpp') != -1 or fn.rfind('.cxx') != -1:
return 8
if fn.rfind('.c') != -1 or fn.rfind('.C') != -1:
return 1
# assemble file type
if fn.rfind('.s') != -1 or fn.rfind('.S') != -1:
return 2
# header type
if fn.rfind('.h') != -1:
return 5
if fn.rfind('.lib') != -1:
return 4
if fn.rfind('.o') != -1:
return 3
# other filetype
return 5
def MDK4AddGroupForFN(ProjectFiles, parent, name, filename, project_path):
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
else:
obj_name = name
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
return group
def MDK4AddLibToGroup(ProjectFiles, group, name, filename, project_path):
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
else:
obj_name = name
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
try:
file_name.text = name.decode(fs_encoding)
except:
file_name.text = name
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
try:
file_path.text = path.decode(fs_encoding)
except:
file_path.text = path
return group
def MDK4AddGroup(ProjectFiles, parent, name, files, project_path):
# don't add an empty group
if len(files) == 0:
return
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name # name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path # path.decode(fs_encoding)
return group
# The common part of making MDK4/5 project
def MDK45Project(tree, target, script):
project_path = os.path.dirname(os.path.abspath(target))
root = tree.getroot()
out = open(target, 'w')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CFLAGS = ''
ProjectFiles = []
# add group
groups = tree.find('Targets/Target/Groups')
if groups is None:
groups = SubElement(tree.find('Targets/Target'), 'Groups')
groups.clear() # clean old groups
for group in script:
group_tree = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path)
# for local CPPPATH/CPPDEFINES
if (group_tree != None) and ('LOCAL_CPPPATH' in group or 'LOCAL_CFLAGS' in group or 'LOCAL_CPPDEFINES' in group):
GroupOption = SubElement(group_tree, 'GroupOption')
GroupArmAds = SubElement(GroupOption, 'GroupArmAds')
Cads = SubElement(GroupArmAds, 'Cads')
VariousControls = SubElement(Cads, 'VariousControls')
MiscControls = SubElement(VariousControls, 'MiscControls')
if 'LOCAL_CFLAGS' in group:
MiscControls.text = group['LOCAL_CFLAGS']
else:
MiscControls.text = ' '
Define = SubElement(VariousControls, 'Define')
if 'LOCAL_CPPDEFINES' in group:
Define.text = ', '.join(set(group['LOCAL_CPPDEFINES']))
else:
Define.text = ' '
Undefine = SubElement(VariousControls, 'Undefine')
Undefine.text = ' '
IncludePath = SubElement(VariousControls, 'IncludePath')
if 'LOCAL_CPPPATH' in group:
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in group['LOCAL_CPPPATH']])
else:
IncludePath.text = ' '
# get each include path
if 'CPPPATH' in group and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if 'CPPDEFINES' in group and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES = group['CPPDEFINES']
# get each group's link flags
if 'LINKFLAGS' in group and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
if 'LIBS' in group and group['LIBS']:
for item in group['LIBS']:
lib_path = ''
for path_item in group['LIBPATH']:
full_path = os.path.join(path_item, item + '.lib')
if os.path.isfile(full_path): # has this library
lib_path = full_path
break
if lib_path != '':
if group_tree != None:
MDK4AddLibToGroup(ProjectFiles, group_tree, group['name'], lib_path, project_path)
else:
group_tree = MDK4AddGroupForFN(ProjectFiles, groups, group['name'], lib_path, project_path)
# write include path, definitions and link flags
IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath')
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in CPPPATH])
Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define')
Define.text = ', '.join(set(CPPDEFINES))
Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc')
Misc.text = LINKFLAGS
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8').decode())
out.close()
def MDK4Project(target, script):
template_tree = etree.parse('template.uvproj')
MDK45Project(template_tree, target, script)
# remove project.uvopt file
project_uvopt = os.path.abspath(target).replace('uvproj', 'uvopt')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
# copy uvopt file
if os.path.exists('template.uvopt'):
import shutil
shutil.copy2('template.uvopt', 'project.uvopt')
def MDK5Project(target, script):
template_tree = etree.parse('template.uvprojx')
MDK45Project(template_tree, target, script)
# remove project.uvopt file
project_uvopt = os.path.abspath(target).replace('uvprojx', 'uvoptx')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
# copy uvopt file
if os.path.exists('template.uvoptx'):
import shutil
shutil.copy2('template.uvoptx', 'project.uvoptx')
def MDKProject(target, script):
template = open('template.Uv2', "r")
lines = template.readlines()
project = open(target, "w")
project_path = os.path.dirname(os.path.abspath(target))
line_index = 5
# write group
for group in script:
lines.insert(line_index, 'Group (%s)\r\n' % group['name'])
line_index += 1
lines.insert(line_index, '\r\n')
line_index += 1
# write file
ProjectFiles = []
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CFLAGS = ''
# number of groups
group_index = 1
for group in script:
# print group['name']
# get each include path
if 'CPPPATH' in group and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if 'CPPDEFINES' in group and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES = group['CPPDEFINES']
# get each group's link flags
if 'LINKFLAGS' in group and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# generate file items
for node in group['src']:
fn = node.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n'
% (group_index, _get_filetype(name), path, name))
line_index += 1
group_index = group_index + 1
lines.insert(line_index, '\r\n')
line_index += 1
# remove repeat path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
CPPPATH = string.join(paths, ';')
definitions = [i for i in set(CPPDEFINES)]
CPPDEFINES = string.join(definitions, ', ')
while line_index < len(lines):
if lines[line_index].startswith(' ADSCINCD '):
lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n'
if lines[line_index].startswith(' ADSLDMC ('):
lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n'
if lines[line_index].startswith(' ADSCDEFN ('):
lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n'
line_index += 1
# write project
for line in lines:
project.write(line)
project.close()
def ARMCC_Version():
import rtconfig
import subprocess
import re
path = rtconfig.EXEC_PATH
path = os.path.join(path, 'armcc.exe')
if os.path.exists(path):
cmd = path
else:
print('Error: get armcc version failed. Please update the KEIL MDK installation path in rtconfig.py!')
return "0.0"
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
'''
example stdout:
Product: MDK Plus 5.24
Component: ARM Compiler 5.06 update 5 (build 528)
Tool: armcc [4d3621]
return version: MDK Plus 5.24/ARM Compiler 5.06 update 5 (build 528)/armcc [4d3621]
'''
version_Product = re.search(r'Product: (.+)', stdout).group(1)
version_Product = version_Product[:-1]
version_Component = re.search(r'Component: (.*)', stdout).group(1)
version_Component = version_Component[:-1]
version_Tool = re.search(r'Tool: (.*)', stdout).group(1)
version_Tool = version_Tool[:-1]
version_str_format = '%s/%s/%s'
version_str = version_str_format % (version_Product, version_Component, version_Tool)
#print('version_str:' + version_str)
return version_str
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.window import WindowTypes
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
@pytest.fixture(autouse=True)
def close_windows(driver):
main_windows_handle = driver.current_window_handle
yield
from urllib import request as url_request
URLError = url_request.URLError
try:
window_handles = driver.window_handles
except URLError:
return
for handle in window_handles:
if handle != main_windows_handle:
driver.switch_to.window(handle)
driver.close()
driver.switch_to.window(main_windows_handle)
def testShouldSwitchFocusToANewWindowWhenItIsOpenedAndNotStopFutureOperations(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
driver.find_element(By.LINK_TEXT, "Open new window").click()
assert driver.title == "XHTML Test Page"
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
assert driver.title == "We Arrive Here"
pages.load("iframes.html")
handle = driver.current_window_handle
driver.find_element(By.ID, "iframe_page_heading")
driver.switch_to.frame(driver.find_element(By.ID, "iframe1"))
assert driver.current_window_handle == handle
def testCanSwitchToWindowByName(driver, pages):
pages.load("xhtmlTest.html")
handles = driver.window_handles
driver.find_element(By.LINK_TEXT, "Open new window").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
driver.switch_to.window("result")
assert driver.title == "We Arrive Here"
def testShouldThrowNoSuchWindowException(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchWindowException):
driver.switch_to.window("invalid name")
@pytest.mark.xfail_safari
def testShouldThrowNoSuchWindowExceptionOnAnAttemptToGetItsHandle(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
handles = driver.window_handles
driver.find_element(By.LINK_TEXT, "Open new window").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
driver.close()
with pytest.raises(NoSuchWindowException):
driver.current_window_handle
@pytest.mark.xfail_ie
def testShouldThrowNoSuchWindowExceptionOnAnyOperationIfAWindowIsClosed(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
handles = driver.window_handles
driver.find_element(By.LINK_TEXT, "Open new window").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
driver.close()
with pytest.raises(NoSuchWindowException):
driver.title
with pytest.raises(NoSuchWindowException):
driver.find_element(By.TAG_NAME, "body")
@pytest.mark.xfail_ie
@pytest.mark.xfail_safari
def testShouldThrowNoSuchWindowExceptionOnAnyElementOperationIfAWindowIsClosed(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
handles = driver.window_handles
driver.find_element(By.LINK_TEXT, "Open new window").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
element = driver.find_element(By.TAG_NAME, "body")
driver.close()
with pytest.raises(NoSuchWindowException):
element.text
def testClickingOnAButtonThatClosesAnOpenWindowDoesNotCauseTheBrowserToHang(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
handles = driver.window_handles
driver.find_element(By.NAME, "windowThree").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
driver.find_element(By.ID, "close").click()
driver.switch_to.window(current)
driver.find_element(By.ID, "linkId")
@pytest.mark.xfail_safari
def testCanCallGetWindowHandlesAfterClosingAWindow(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
handles = driver.window_handles
driver.find_element(By.NAME, "windowThree").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
driver.find_element(By.ID, "close").click()
WebDriverWait(driver, 3).until(EC.number_of_windows_to_be(1))
def testCanObtainAWindowHandle(driver, pages):
pages.load("xhtmlTest.html")
currentHandle = driver.current_window_handle
assert currentHandle is not None
def testFailingToSwitchToAWindowLeavesTheCurrentWindowAsIs(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
with pytest.raises(NoSuchWindowException):
driver.switch_to.window("I will never exist")
new_handle = driver.current_window_handle
assert current == new_handle
@pytest.mark.xfail_safari
def testThatAccessingFindingAnElementAfterWindowIsClosedAndHaventswitchedDoesntCrash(driver, pages):
pages.load("xhtmlTest.html")
current = driver.current_window_handle
handles = driver.window_handles
driver.find_element(By.NAME, "windowThree").click()
WebDriverWait(driver, 3).until(EC.new_window_is_opened(handles))
handles = driver.window_handles
handles.remove(current)
driver.switch_to.window(handles[0])
with pytest.raises(WebDriverException):
driver.find_element(By.ID, "close").click()
all_handles = driver.window_handles
assert 1 == len(all_handles)
driver.find_element(By.ID, "close")
driver.switch_to.window(current)
@pytest.mark.xfail_ie
def testShouldBeAbleToCreateANewWindow(driver, pages):
original_handle = driver.current_window_handle
driver.switch_to.new_window(WindowTypes.TAB)
new_handle = driver.current_window_handle
driver.close()
driver.switch_to.window(original_handle)
assert new_handle != original_handle
|
|
import numpy as np
from astropy.coordinates import SkyCoord, ICRS, BaseCoordinateFrame
from astropy.io import fits
from astropy import wcs
import astropy.units as u
from astromodels.functions.function import Function2D, FunctionMeta
from astromodels.utils.angular_distance import angular_distance
from astromodels.utils.vincenty import vincenty
import hashlib
class Latitude_galactic_diffuse(Function2D):
r"""
description :
A Gaussian distribution in Galactic latitude around the Galactic plane
latex : $ K \exp{\left( \frac{-b^2}{2 \sigma_b^2} \right)} $
parameters :
K :
desc : normalization
initial value : 1
sigma_b :
desc : Sigma for
initial value : 1
l_min :
desc : min Longtitude
initial value : 10
l_max :
desc : max Longtitude
initial value : 30
"""
__metaclass__ = FunctionMeta
# This is optional, and it is only needed if we need more setup after the
# constructor provided by the meta class
def _setup(self):
self._frame = ICRS()
def set_frame(self, new_frame):
"""
Set a new frame for the coordinates (the default is ICRS J2000)
:param new_frame: a coordinate frame from astropy
:return: (none)
"""
assert isinstance(new_frame, BaseCoordinateFrame)
self._frame = new_frame
def _set_units(self, x_unit, y_unit, z_unit):
self.K.unit = z_unit
self.sigma_b.unit = x_unit
self.l_min.unit = y_unit
self.l_max.unit = y_unit
def evaluate(self, x, y, K, sigma_b, l_min, l_max):
# We assume x and y are R.A. and Dec
_coord = SkyCoord(ra=x, dec=y, frame=self._frame, unit="deg")
b = _coord.transform_to('galactic').b.value
l = _coord.transform_to('galactic').l.value
return K * np.exp(-b ** 2 / (2 * sigma_b ** 2)) * np.logical_or(np.logical_and(l > l_min, l < l_max),np.logical_and(l_min > l_max, np.logical_or(l > l_min, l < l_max)))
def get_boundaries(self):
max_b = self.sigma_b.max_value
l_min = self.l_min.value
l_max = self.l_max.value
_coord = SkyCoord(l=[l_min, l_min, l_max, l_max], b=[max_b * -2., max_b * 2., max_b * 2., max_b * -2.], frame="galactic", unit="deg")
# no dealing with 0 360 overflow
min_lat = min(_coord.transform_to("icrs").dec.value)
max_lat = max(_coord.transform_to("icrs").dec.value)
min_lon = min(_coord.transform_to("icrs").ra.value)
max_lon = max(_coord.transform_to("icrs").ra.value)
return (min_lon, max_lon), (min_lat, max_lat)
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
dL= self.l_max.value-self.l_min.value if self.l_max.value > self.l_min.value else 360 + self.l_max.value - self.l_max.value
#integral -inf to inf exp(-b**2 / 2*sigma_b**2 ) db = sqrt(2pi)*sigma_b
#Note that K refers to the peak diffuse flux (at b = 0) per square degree.
integral = np.sqrt( 2*np.pi ) * self.sigma_b.value * self.K.value * dL
if isinstance( z, u.Quantity):
z = z.value
return integral * np.power( 180. / np.pi, -2 ) * np.ones_like( z )
class Gaussian_on_sphere(Function2D):
r"""
description :
A bidimensional Gaussian function on a sphere (in spherical coordinates)
latex : $$ f(\vec{x}) = \left(\frac{180^\circ}{\pi}\right)^2 \frac{1}{2\pi \sqrt{\det{\Sigma}}} \, {\rm exp}\left( -\frac{1}{2} (\vec{x}-\vec{x}_0)^\intercal \cdot \Sigma^{-1}\cdot (\vec{x}-\vec{x}_0)\right) \\ \vec{x}_0 = ({\rm RA}_0,{\rm Dec}_0)\\ \Lambda = \left( \begin{array}{cc} \sigma^2 & 0 \\ 0 & \sigma^2 (1-e^2) \end{array}\right) \\ U = \left( \begin{array}{cc} \cos \theta & -\sin \theta \\ \sin \theta & cos \theta \end{array}\right) \\\Sigma = U\Lambda U^\intercal $$
parameters :
lon0 :
desc : Longitude of the center of the source
initial value : 0.0
min : 0.0
max : 360.0
lat0 :
desc : Latitude of the center of the source
initial value : 0.0
min : -90.0
max : 90.0
sigma :
desc : Standard deviation of the Gaussian distribution
initial value : 0.5
min : 0
max : 20
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit, z_unit):
# lon0 and lat0 and rdiff have most probably all units of degrees. However,
# let's set them up here just to save for the possibility of using the
# formula with other units (although it is probably never going to happen)
self.lon0.unit = x_unit
self.lat0.unit = y_unit
self.sigma.unit = x_unit
def evaluate(self, x, y, lon0, lat0, sigma):
lon, lat = x,y
angsep = angular_distance(lon0, lat0, lon, lat)
s2 = sigma**2
return (180 / np.pi)**2 * 1 / (2.0 * np.pi * s2) * np.exp(-0.5 * angsep**2/s2)
def get_boundaries(self):
# Truncate the gaussian at 2 times the max of sigma allowed
max_sigma = self.sigma.max_value
min_lat = max(-90., self.lat0.value - 2 * max_sigma)
max_lat = min(90., self.lat0.value + 2 * max_sigma)
max_abs_lat = max(np.absolute(min_lat), np.absolute(max_lat))
if max_abs_lat > 89. or 2 * max_sigma / np.cos(max_abs_lat * np.pi / 180.) >= 180.:
min_lon = 0.
max_lon = 360.
else:
min_lon = self.lon0.value - 2 * max_sigma / np.cos(max_abs_lat * np.pi / 180.)
max_lon = self.lon0.value + 2 * max_sigma / np.cos(max_abs_lat * np.pi / 180.)
if min_lon < 0.:
min_lon = min_lon + 360.
elif max_lon > 360.:
max_lon = max_lon - 360.
return (min_lon, max_lon), (min_lat, max_lat)
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
if isinstance( z, u.Quantity):
z = z.value
return np.ones_like( z )
class Asymm_Gaussian_on_sphere(Function2D):
r"""
description :
A bidimensional Gaussian function on a sphere (in spherical coordinates)
see https://en.wikipedia.org/wiki/Gaussian_function#Two-dimensional_Gaussian_function
parameters :
lon0 :
desc : Longitude of the center of the source
initial value : 0.0
min : 0.0
max : 360.0
lat0 :
desc : Latitude of the center of the source
initial value : 0.0
min : -90.0
max : 90.0
a :
desc : Standard deviation of the Gaussian distribution (major axis)
initial value : 0.5
min : 0
max : 20
e :
desc : Excentricity of Gaussian ellipse
initial value : 0.5
min : 0
max : 1
theta :
desc : inclination of major axis to a line of constant latitude
initial value : 0.0
min : -90.0
max : 90.0
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit, z_unit):
# lon0 and lat0 and a have most probably all units of degrees. However,
# let's set them up here just to save for the possibility of using the
# formula with other units (although it is probably never going to happen)
self.lon0.unit = x_unit
self.lat0.unit = y_unit
self.a.unit = x_unit
self.e.unit = u.dimensionless_unscaled
self.theta.unit = u.degree
def evaluate(self, x, y, lon0, lat0, a, e, theta):
lon, lat = x,y
b = a * np.sqrt(1. - e**2)
dX = np.atleast_1d( angular_distance( lon0, lat0, lon, lat0) )
dY = np.atleast_1d( angular_distance( lon0, lat0, lon0, lat) )
dlon = lon - lon0
if isinstance( dlon, u.Quantity):
dlon = (dlon.to(u.degree)).value
idx=np.logical_and( np.logical_or( dlon < 0, dlon > 180), np.logical_or( dlon>-180, dlon < -360) )
dX[idx] = -dX[idx]
idx = lat < lat0
dY[idx]=-dY[idx]
if isinstance( theta, u.Quantity ):
phi = (theta.to(u.degree)).value + 90.0
else:
phi = theta + 90.
cos2_phi = np.power( np.cos( phi * np.pi/180.), 2)
sin2_phi = np.power( np.sin( phi * np.pi/180.), 2)
sin_2phi = np.sin( 2. * phi * np.pi/180.)
A = cos2_phi / (2.*b**2) + sin2_phi / (2.*a**2)
B = - sin_2phi / (4.*b**2) + sin_2phi / (4.*a**2)
C = sin2_phi / (2.*b**2) + cos2_phi / (2.*a**2)
E = -A*np.power(dX, 2) + 2.*B*dX*dY - C*np.power(dY, 2)
return np.power(180. / np.pi, 2) * 1. / (2 * np.pi * a * b) * np.exp( E )
def get_boundaries(self):
# Truncate the gaussian at 2 times the max of sigma allowed
min_lat = max(-90., self.lat0.value - 2 * self.a.max_value)
max_lat = min(90., self.lat0.value + 2 * self.a.max_value)
max_abs_lat = max(np.absolute(min_lat), np.absolute(max_lat))
if max_abs_lat > 89. or 2 * self.a.max_value / np.cos(max_abs_lat * np.pi / 180.) >= 180.:
min_lon = 0.
max_lon = 360.
else:
min_lon = self.lon0.value - 2 * self.a.max_value / np.cos(max_abs_lat * np.pi / 180.)
max_lon = self.lon0.value + 2 * self.a.max_value / np.cos(max_abs_lat * np.pi / 180.)
if min_lon < 0.:
min_lon = min_lon + 360.
elif max_lon > 360.:
max_lon = max_lon - 360.
return (min_lon, max_lon), (min_lat, max_lat)
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
if isinstance( z, u.Quantity):
z = z.value
return np.ones_like( z )
class Disk_on_sphere(Function2D):
r"""
description :
A bidimensional disk/tophat function on a sphere (in spherical coordinates)
latex : $$ f(\vec{x}) = \left(\frac{180}{\pi}\right)^2 \frac{1}{\pi~({\rm radius})^2} ~\left\{\begin{matrix} 1 & {\rm if}& {\rm | \vec{x} - \vec{x}_0| \le {\rm radius}} \\ 0 & {\rm if}& {\rm | \vec{x} - \vec{x}_0| > {\rm radius}} \end{matrix}\right. $$
parameters :
lon0 :
desc : Longitude of the center of the source
initial value : 0.0
min : 0.0
max : 360.0
lat0 :
desc : Latitude of the center of the source
initial value : 0.0
min : -90.0
max : 90.0
radius :
desc : Radius of the disk
initial value : 0.5
min : 0
max : 20
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit, z_unit):
# lon0 and lat0 and rdiff have most probably all units of degrees. However,
# let's set them up here just to save for the possibility of using the
# formula with other units (although it is probably never going to happen)
self.lon0.unit = x_unit
self.lat0.unit = y_unit
self.radius.unit = x_unit
def evaluate(self, x, y, lon0, lat0, radius):
lon, lat = x,y
angsep = angular_distance(lon0, lat0, lon, lat)
return np.power(180 / np.pi, 2) * 1. / (np.pi * radius ** 2) * (angsep <= radius)
def get_boundaries(self):
# Truncate the disk at 2 times the max of radius allowed
max_radius = self.radius.max_value
min_lat = max(-90., self.lat0.value - 2 * max_radius)
max_lat = min(90., self.lat0.value + 2 * max_radius)
max_abs_lat = max(np.absolute(min_lat), np.absolute(max_lat))
if max_abs_lat > 89. or 2 * max_radius / np.cos(max_abs_lat * np.pi / 180.) >= 180.:
min_lon = 0.
max_lon = 360.
else:
min_lon = self.lon0.value - 2 * max_radius / np.cos(max_abs_lat * np.pi / 180.)
max_lon = self.lon0.value + 2 * max_radius / np.cos(max_abs_lat * np.pi / 180.)
if min_lon < 0.:
min_lon = min_lon + 360.
elif max_lon > 360.:
max_lon = max_lon - 360.
return (min_lon, max_lon), (min_lat, max_lat)
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
if isinstance( z, u.Quantity):
z = z.value
return np.ones_like( z )
class Ellipse_on_sphere(Function2D):
r"""
description :
An ellipse function on a sphere (in spherical coordinates)
latex : $$ f(\vec{x}) = \left(\frac{180}{\pi}\right)^2 \frac{1}{\pi~ a b} ~\left\{\begin{matrix} 1 & {\rm if}& {\rm | \vec{x} - \vec{x}_{f1}| + | \vec{x} - \vec{x}_{f2}| \le {\rm 2a}} \\ 0 & {\rm if}& {\rm | \vec{x} - \vec{x}_{f1}| + | \vec{x} - \vec{x}_{f2}| > {\rm 2a}} \end{matrix}\right. $$
parameters :
lon0 :
desc : Longitude of the center of the source
initial value : 0.0
min : 0.0
max : 360.0
lat0 :
desc : Latitude of the center of the source
initial value : 0.0
min : -90.0
max : 90.0
a :
desc : semimajor axis of the ellipse
initial value : 0.5
min : 0
max : 20
e :
desc : eccentricity of ellipse
initial value : 0.5
min : 0
max : 1
theta :
desc : inclination of semimajoraxis to a line of constant latitude
initial value : 0.0
min : -90.0
max : 90.0
"""
__metaclass__ = FunctionMeta
lon1 = None
lat1 = None
lon2 = None
lat2 = None
focal_pts = False
def _set_units(self, x_unit, y_unit, z_unit):
# lon0 and lat0 have most probably all units of degrees.
# However, let's set them up here just to save for the possibility of
# using the formula with other units (although it is probably never
# going to happen)
self.lon0.unit = x_unit
self.lat0.unit = y_unit
self.a.unit = x_unit
# eccentricity is dimensionless
self.e.unit = u.dimensionless_unscaled
self.theta.unit = u.degree
def calc_focal_pts(self, lon0, lat0, a, b, theta):
# focal distance
f = np.sqrt(a**2 - b**2)
if isinstance( theta, u.Quantity):
bearing = 90. - (theta.to(u.degree)).value
else:
bearing = 90. - theta
# coordinates of focal points
lon1, lat1 = vincenty(lon0, lat0, bearing, f)
lon2, lat2 = vincenty(lon0, lat0, bearing + 180., f)
return lon1, lat1, lon2, lat2
def evaluate(self, x, y, lon0, lat0, a, e, theta):
b = a * np.sqrt(1. - e**2)
# calculate focal points
self.lon1, self.lat1, self.lon2, self.lat2 = self.calc_focal_pts(lon0, lat0, a, b, theta)
self.focal_pts = True
# lon/lat of point in question
lon, lat = x, y
# sum of geodesic distances to focii (should be <= 2a to be in ellipse)
angsep1 = angular_distance(self.lon1, self.lat1, lon, lat)
angsep2 = angular_distance(self.lon2, self.lat2, lon, lat)
angsep = angsep1 + angsep2
return np.power(180 / np.pi, 2) * 1. / (np.pi * a * b) * (angsep <= 2*a)
def get_boundaries(self):
# Truncate the ellipse at 2 times the max of semimajor axis allowed
max_radius = self.a.max_value
min_lat = max(-90., self.lat0.value - 2 * max_radius)
max_lat = min(90., self.lat0.value + 2 * max_radius)
max_abs_lat = max(np.absolute(min_lat), np.absolute(max_lat))
if max_abs_lat > 89. or 2 * max_radius / np.cos(max_abs_lat * np.pi / 180.) >= 180.:
min_lon = 0.
max_lon = 360.
else:
min_lon = self.lon0.value - 2 * max_radius / np.cos(max_abs_lat * np.pi / 180.)
max_lon = self.lon0.value + 2 * max_radius / np.cos(max_abs_lat * np.pi / 180.)
if min_lon < 0.:
min_lon = min_lon + 360.
elif max_lon > 360.:
max_lon = max_lon - 360.
return (min_lon, max_lon), (min_lat, max_lat)
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
if isinstance( z, u.Quantity):
z = z.value
return np.ones_like( z )
class SpatialTemplate_2D(Function2D):
r"""
description :
User input Spatial Template. Expected to be normalized to 1/sr
latex : $ hi $
parameters :
K :
desc : normalization
initial value : 1
fix : yes
hash :
desc: hash of model map [needed for memoization]
initial value: 1
fix: yes
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit, z_unit):
self.K.unit = z_unit
# This is optional, and it is only needed if we need more setup after the
# constructor provided by the meta class
def _setup(self):
self._frame = "icrs"
self._fitsfile = None
self._map = None
def load_file(self,fitsfile, ihdu=0):
if fitsfile is None:
raise RuntimeError( "Need to specify a fits file with a template map." )
self._fitsfile=fitsfile
with fits.open(self._fitsfile) as f:
self._wcs = wcs.WCS( header = f[ihdu].header )
self._map = f[ihdu].data
self._nX = f[ihdu].header['NAXIS1']
self._nY = f[ihdu].header['NAXIS2']
#note: map coordinates are switched compared to header. NAXIS1 is coordinate 1, not 0.
#see http://docs.astropy.org/en/stable/io/fits/#working-with-image-data
assert self._map.shape[1] == self._nX, "NAXIS1 = %d in fits header, but %d in map" % (self._nX, self._map.shape[1])
assert self._map.shape[0] == self._nY, "NAXIS2 = %d in fits header, but %d in map" % (self._nY, self._map.shape[0])
#hash sum uniquely identifying the template function (defined by its 2D map array and coordinate system)
#this is needed so that the memoization won't confuse different SpatialTemplate_2D objects.
h = hashlib.sha224()
h.update( self._map)
h.update( repr(self._wcs) )
self.hash = int(h.hexdigest(), 16)
def to_dict(self, minimal=False):
data = super(Function2D, self).to_dict(minimal)
if not minimal:
data['extra_setup'] = {"_fitsfile": self._fitsfile, "_frame": self._frame }
return data
def set_frame(self, new_frame):
"""
Set a new frame for the coordinates (the default is ICRS J2000)
:param new_frame: a coordinate frame from astropy
:return: (none)
"""
assert new_frame.lower() in ['icrs', 'galactic', 'fk5', 'fk4', 'fk4_no_e' ]
self._frame = new_frame
def evaluate(self, x, y, K, hash):
if self._map is None:
self.load_file(self._fitsfile)
# We assume x and y are R.A. and Dec
coord = SkyCoord(ra=x, dec=y, frame=self._frame, unit="deg")
#transform input coordinates to pixel coordinates;
#SkyCoord takes care of necessary coordinate frame transformations.
Xpix, Ypix = coord.to_pixel(self._wcs)
Xpix = np.atleast_1d(Xpix.astype(int))
Ypix = np.atleast_1d(Ypix.astype(int))
# find pixels that are in the template ROI, otherwise return zero
#iz = np.where((Xpix<self._nX) & (Xpix>=0) & (Ypix<self._nY) & (Ypix>=0))[0]
iz = (Xpix<self._nX) & (Xpix>=0) & (Ypix<self._nY) & (Ypix>=0)
out = np.zeros_like(Xpix)
out[iz] = self._map[Ypix[iz], Xpix[iz]]
return np.multiply(K,out)
def get_boundaries(self):
if self._map is None:
self.load_file(self._fitsfile)
#We use the max/min RA/Dec of the image corners to define the boundaries.
#Use the 'outside' of the pixel corners, i.e. from pixel 0 to nX in 0-indexed accounting.
Xcorners = np.array( [0, 0, self._nX, self._nX] )
Ycorners = np.array( [0, self._nY, 0, self._nY] )
corners = SkyCoord.from_pixel( Xcorners, Ycorners, wcs=self._wcs, origin = 0).transform_to(self._frame)
min_lon = min(corners.ra.degree)
max_lon = max(corners.ra.degree)
min_lat = min(corners.dec.degree)
max_lat = max(corners.dec.degree)
return (min_lon, max_lon), (min_lat, max_lat)
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
if isinstance( z, u.Quantity):
z = z.value
return np.multiply(self.K.value,np.ones_like( z ) )
class Power_law_on_sphere(Function2D):
r"""
description :
A power law function on a sphere (in spherical coordinates)
latex : $$ f(\vec{x}) = \left(\frac{180}{\pi}\right)^{-1.*index} \left\{\begin{matrix} 0.05^{index} & {\rm if} & |\vec{x}-\vec{x}_0| \le 0.05\\ |\vec{x}-\vec{x}_0|^{index} & {\rm if} & 0.05 < |\vec{x}-\vec{x}_0| \le maxr \\ 0 & {\rm if} & |\vec{x}-\vec{x}_0|>maxr\end{matrix}\right. $$
parameters :
lon0 :
desc : Longitude of the center of the source
initial value : 0.0
min : 0.0
max : 360.0
lat0 :
desc : Latitude of the center of the source
initial value : 0.0
min : -90.0
max : 90.0
index :
desc : power law index
initial value : -2.0
min : -5.0
max : -1.0
maxr :
desc : max radius
initial value : 5.
fix : yes
minr :
desc : radius below which the PL is approximated as a constant
initial value : 0.05
fix : yes
"""
__metaclass__ = FunctionMeta
def _set_units(self, x_unit, y_unit, z_unit):
# lon0 and lat0 and rdiff have most probably all units of degrees. However,
# let's set them up here just to save for the possibility of using the
# formula with other units (although it is probably never going to happen)
self.lon0.unit = x_unit
self.lat0.unit = y_unit
self.index.unit = u.dimensionless_unscaled
self.maxr.unit = x_unit
self.minr.unit = x_unit
def evaluate(self, x, y, lon0, lat0, index, maxr, minr):
lon, lat = x,y
angsep = angular_distance(lon0, lat0, lon, lat)
if maxr <= minr:
norm = np.power(np.pi / 180., 2.+index) * np.pi * maxr**2 * minr**index
elif self.index.value == -2.:
norm = np.pi * (1.0 + 2. * np.log(maxr / minr) )
else:
norm = np.power(minr * np.pi / 180., 2.+index) * np.pi + 2. * np.pi / (2.+index) * (np.power(maxr * np.pi / 180., index+2.) - np.power(minr * np.pi / 180., index+2.))
value = np.less_equal(angsep,maxr) * np.power(np.pi / 180., index) * np.power(np.add(np.multiply(angsep, np.greater(angsep, minr)), np.multiply(minr, np.less_equal(angsep, minr))), index)
return value / norm
def get_boundaries(self):
return ((self.lon0.value - self.maxr.value), (self.lon0.value + self.maxr.value)), ((self.lat0.value - self.maxr.value), (self.lat0.value + self.maxr.value))
def get_total_spatial_integral(self, z=None):
"""
Returns the total integral (for 2D functions) or the integral over the spatial components (for 3D functions).
needs to be implemented in subclasses.
:return: an array of values of the integral (same dimension as z).
"""
if isinstance( z, u.Quantity):
z = z.value
return np.ones_like( z )
# class FunctionIntegrator(Function2D):
# r"""
# description :
#
# Returns the average of the integrand function (a 1-d function) over the interval x-y. The integrand is set
# using the .integrand property, like in:
#
# > G = FunctionIntegrator()
# > G.integrand = Powerlaw()
#
# latex : $$ G(x,y) = \frac{\int_{x}^{y}~f(x)~dx}{y-x}$$
#
# parameters :
#
# s :
#
# desc : if s=0, then the integral will *not* be normalized by (y-x), otherwise (default) it will.
# initial value : 1
# fix : yes
# """
#
# __metaclass__ = FunctionMeta
#
# def _set_units(self, x_unit, y_unit, z_unit):
#
# # lon0 and lat0 and rdiff have most probably all units of degrees. However,
# # let's set them up here just to save for the possibility of using the
# # formula with other units (although it is probably never going to happen)
#
# self.s = u.dimensionless_unscaled
#
# def evaluate(self, x, y, s):
#
# assert y-x >= 0, "Cannot obtain the integral if the integration interval is zero or negative!"
#
# integral = self._integrand.integral(x, y)
#
# if s==0:
#
# return integral
#
# else:
#
# return integral / (y-x)
#
#
# def get_boundaries(self):
#
# return (-np.inf, +np.inf), (-np.inf, +np.inf)
#
# def _set_integrand(self, function):
#
# self._integrand = function
#
# def _get_integrand(self):
#
# return self._integrand
#
# integrand = property(_get_integrand, _set_integrand,
# doc="""Get/set the integrand""")
#
#
# def to_dict(self, minimal=False):
#
# data = super(Function2D, self).to_dict(minimal)
#
# if not minimal:
# data['extra_setup'] = {'integrand': self.integrand.path}
#
# return data
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
import webob
from cinder import exception
from cinder.openstack.common import gettextutils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder import wsgi
from lxml import etree
from xml.dom import minidom
from xml.parsers import expat
XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.volume+json': 'application/json',
'application/vnd.openstack.volume+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.volume+json',
'application/xml',
'application/vnd.openstack.volume+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.volume+json': 'json',
'application/json': 'json',
'application/vnd.openstack.volume+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._resource_cache = {}
def cache_resource(self, resource_to_cache, id_attribute='id', name=None):
"""Cache the given resource.
Allow API methods to cache objects, such as results from a DB query,
to be used by API extensions within the same API request.
The resource_to_cache can be a list or an individual resource,
but ultimately resources are cached individually using the given
id_attribute.
Different resources types might need to be cached during the same
request, they can be cached using the name parameter. For example:
Controller 1:
request.cache_resource(db_volumes, 'volumes')
request.cache_resource(db_volume_types, 'types')
Controller 2:
db_volumes = request.cached_resource('volumes')
db_type_1 = request.cached_resource_by_id('1', 'types')
If no name is given, a default name will be used for the resource.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
if not isinstance(resource_to_cache, list):
resource_to_cache = [resource_to_cache]
if not name:
name = self.path
cached_resources = self._resource_cache.setdefault(name, {})
for resource in resource_to_cache:
cached_resources[resource[id_attribute]] = resource
def cached_resource(self, name=None):
"""Get the cached resources cached under the given resource name.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
:returns: a dict of id_attribute to the resource from the cached
resources, an empty map if an empty collection was cached,
or None if nothing has been cached yet under this name
"""
if not name:
name = self.path
if name not in self._resource_cache:
# Nothing has been cached for this key yet
return None
return self._resource_cache[name]
def cached_resource_by_id(self, resource_id, name=None):
"""Get a resource by ID cached under the given resource name.
Allow an API extension to get a previously stored object
within the same API request. This is basically a convenience method
to lookup by ID on the dictionary of all cached resources.
Note that the object data will be slightly stale.
:returns: the cached resource or None if the item is not in the cache
"""
resources = self.cached_resource(name)
if not resources:
# Nothing has been cached yet for this key yet
return None
return resources.get(resource_id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'cinder.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['cinder.best_content_type'] = (content_type or
'application/json')
return self.environ['cinder.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
allowed_types = SUPPORTED_CONTENT_TYPES
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = gettextutils.get_available_languages('cinder')
return self.accept_language.best_match(all_languages)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = utils.safe_minidom_parse_string(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named_in_namespace(self, parent, namespace, name):
"""Search a nodes children for the first child with a given name."""
for node in parent.childNodes:
if (node.localName == name and
node.namespaceURI and
node.namespaceURI == namespace):
return node
return None
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def find_attribute_or_element(self, parent, name):
"""Get an attribute value; fallback to an element if not found"""
if parent.hasAttribute(name):
return parent.getAttribute(name)
node = self.find_first_child_named(parent, name)
if node:
return self.extract_text(node)
return None
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return jsonutils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = value
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = utils.safe_minidom_parse_string(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
raise Fault(webob.exc.HTTPForbidden(explanation=ex_value.msg))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=ex_value.msg))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_(
'Exception handling resource: %s') %
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, **deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('cinder.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)
return response
def get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
LOG.debug("Action body: %s" % body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
if not is_dict(body[entity_name]):
return False
return True
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
locale = req.best_match_language()
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
fault_data = {
fault_name: {
'code': code,
'message': gettextutils.get_localized_message(explanation,
locale)}}
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V1)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
def _set_request_id_header(req, headers):
context = req.environ.get('cinder.context')
if context:
headers['x-compute-request-id'] = context.request_id
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Serializes the wrapped exception conforming to our error format."""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimitFault": "code"}}
def translate(msg):
locale = request.best_match_language()
return gettextutils.get_localized_message(msg, locale)
self.content['overLimitFault']['message'] = \
translate(self.content['overLimitFault']['message'])
self.content['overLimitFault']['details'] = \
translate(self.content['overLimitFault']['details'])
xml_serializer = XMLDictSerializer(metadata, XMLNS_V1)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
|
|
import copy
from re import compile
import time
import requests
from .rest import RequestsNetworkWrapper, ServiceClient
from .rest_client import RESTServiceClient, Endpoint
from .custom_query_object import CustomQueryObject
import os
import logging
logger = logging.getLogger(__name__)
_name_checker = compile(r"^[\w -]+$")
def _check_endpoint_type(name):
if not isinstance(name, str):
raise TypeError("Endpoint name must be a string")
if name == "":
raise ValueError("Endpoint name cannot be empty")
def _check_hostname(name):
_check_endpoint_type(name)
hostname_checker = compile(r"^^http(s)?://[\w.-]+(/)?(:\d+)?(/)?$")
if not hostname_checker.match(name):
raise ValueError(
f"endpoint name {name} should be in http(s)://<hostname>"
"[:<port>] and hostname may consist only of: "
"a-z, A-Z, 0-9, underscore and hyphens."
)
def _check_endpoint_name(name):
"""Checks that the endpoint name is valid by comparing it with an RE and
checking that it is not reserved."""
_check_endpoint_type(name)
if not _name_checker.match(name):
raise ValueError(
f"endpoint name {name} can only contain: a-z, A-Z, 0-9,"
" underscore, hyphens and spaces."
)
class Client:
def __init__(self, endpoint, query_timeout=1000):
"""
Connects to a running server.
The class constructor takes a server address which is then used to
connect for all subsequent member APIs.
Parameters
----------
endpoint : str, optional
The server URL.
query_timeout : float, optional
The timeout for query operations.
"""
_check_hostname(endpoint)
self._endpoint = endpoint
session = requests.session()
session.verify = False
requests.packages.urllib3.disable_warnings()
# Setup the communications layer.
network_wrapper = RequestsNetworkWrapper(session)
service_client = ServiceClient(self._endpoint, network_wrapper)
self._service = RESTServiceClient(service_client)
if not type(query_timeout) in (int, float) or query_timeout <= 0:
query_timeout = 0.0
self._service.query_timeout = query_timeout
def __repr__(self):
return (
"<"
+ self.__class__.__name__
+ " object at "
+ hex(id(self))
+ " connected to "
+ repr(self._endpoint)
+ ">"
)
def get_status(self):
"""
Gets the status of the deployed endpoints.
Returns
-------
dict
Keys are endpoints and values are dicts describing the state of
the endpoint.
Examples
--------
.. sourcecode:: python
{
u'foo': {
u'status': u'LoadFailed',
u'last_error': u'error mesasge',
u'version': 1,
u'type': u'model',
},
}
"""
return self._service.get_status()
#
# Query
#
@property
def query_timeout(self):
"""The timeout for queries in milliseconds."""
return self._service.query_timeout
@query_timeout.setter
def query_timeout(self, value):
if type(value) in (int, float) and value > 0:
self._service.query_timeout = value
def query(self, name, *args, **kwargs):
"""Query an endpoint.
Parameters
----------
name : str
The name of the endpoint.
*args : list of anything
Ordered parameters to the endpoint.
**kwargs : dict of anything
Named parameters to the endpoint.
Returns
-------
dict
Keys are:
model: the name of the endpoint
version: the version used.
response: the response to the query.
uuid : a unique id for the request.
"""
return self._service.query(name, *args, **kwargs)
#
# Endpoints
#
def get_endpoints(self, type=None):
"""Returns all deployed endpoints.
Examples
--------
.. sourcecode:: python
{"clustering":
{"description": "",
"docstring": "-- no docstring found in query function --",
"creation_time": 1469511182,
"version": 1,
"dependencies": [],
"last_modified_time": 1469511182,
"type": "model",
"target": null},
"add": {
"description": "",
"docstring": "-- no docstring found in query function --",
"creation_time": 1469505967,
"version": 1,
"dependencies": [],
"last_modified_time": 1469505967,
"type": "model",
"target": null}
}
"""
return self._service.get_endpoints(type)
def _get_endpoint_upload_destination(self):
"""Returns the endpoint upload destination."""
return self._service.get_endpoint_upload_destination()["path"]
def deploy(self, name, obj, description="", schema=None, override=False):
"""Deploys a Python function as an endpoint in the server.
Parameters
----------
name : str
A unique identifier for the endpoint.
obj : function
Refers to a user-defined function with any signature. However both
input and output of the function need to be JSON serializable.
description : str, optional
The description for the endpoint. This string will be returned by
the ``endpoints`` API.
schema : dict, optional
The schema of the function, containing information about input and
output parameters, and respective examples. Providing a schema for
a deployed function lets other users of the service discover how to
use it. Refer to schema.generate_schema for more information on
how to generate the schema.
override : bool
Whether to override (update) an existing endpoint. If False and
there is already an endpoint with that name, it will raise a
RuntimeError. If True and there is already an endpoint with that
name, it will deploy a new version on top of it.
See Also
--------
remove, get_endpoints
"""
endpoint = self.get_endpoints().get(name)
version = 1
if endpoint:
if not override:
raise RuntimeError(
f"An endpoint with that name ({name}) already"
' exists. Use "override = True" to force update '
"an existing endpoint."
)
version = endpoint.version + 1
obj = self._gen_endpoint(name, obj, description, version, schema)
self._upload_endpoint(obj)
if version == 1:
self._service.add_endpoint(Endpoint(**obj))
else:
self._service.set_endpoint(Endpoint(**obj))
self._wait_for_endpoint_deployment(obj["name"], obj["version"])
def remove(self, name):
'''Removes an endpoint dict.
Parameters
----------
name : str
Endpoint name to remove'''
self._service.remove_endpoint(name)
def _gen_endpoint(self, name, obj, description, version=1, schema=None):
"""Generates an endpoint dict.
Parameters
----------
name : str
Endpoint name to add or update
obj : func
Object that backs the endpoint. See add() for a complete
description.
description : str
Description of the endpoint
version : int
The version. Defaults to 1.
Returns
-------
dict
Keys:
name : str
The name provided.
version : int
The version provided.
description : str
The provided description.
type : str
The type of the endpoint.
endpoint_obj : object
The wrapper around the obj provided that can be used to
generate the code and dependencies for the endpoint.
Raises
------
TypeError
When obj is not one of the expected types.
"""
# check for invalid PO names
_check_endpoint_name(name)
if description is None:
description = obj.__doc__.strip() or "" if isinstance(obj.__doc__, str) else ""
endpoint_object = CustomQueryObject(query=obj, description=description,)
return {
"name": name,
"version": version,
"description": description,
"type": "model",
"endpoint_obj": endpoint_object,
"dependencies": endpoint_object.get_dependencies(),
"methods": endpoint_object.get_methods(),
"required_files": [],
"required_packages": [],
"schema": copy.copy(schema),
}
def _upload_endpoint(self, obj):
"""Sends the endpoint across the wire."""
endpoint_obj = obj["endpoint_obj"]
dest_path = self._get_endpoint_upload_destination()
# Upload the endpoint
obj["src_path"] = os.path.join(
dest_path, "endpoints", obj["name"], str(obj["version"])
)
endpoint_obj.save(obj["src_path"])
def _wait_for_endpoint_deployment(
self, endpoint_name, version=1, interval=1.0,
):
"""
Waits for the endpoint to be deployed by calling get_status() and
checking the versions deployed of the endpoint against the expected
version. If all the versions are equal to or greater than the version
expected, then it will return. Uses time.sleep().
"""
logger.info(
f"Waiting for endpoint {endpoint_name} to deploy to " f"version {version}"
)
start = time.time()
while True:
ep_status = self.get_status()
try:
ep = ep_status[endpoint_name]
except KeyError:
logger.info(
f"Endpoint {endpoint_name} doesn't " "exist in endpoints yet"
)
else:
logger.info(f"ep={ep}")
if ep["status"] == "LoadFailed":
raise RuntimeError(f'LoadFailed: {ep["last_error"]}')
elif ep["status"] == "LoadSuccessful":
if ep["version"] >= version:
logger.info("LoadSuccessful")
break
else:
logger.info("LoadSuccessful but wrong version")
if time.time() - start > 10:
raise RuntimeError("Waited more then 10s for deployment")
logger.info(f"Sleeping {interval}...")
time.sleep(interval)
def set_credentials(self, username, password):
"""
Set credentials for all the TabPy client-server communication
where client is tabpy-tools and server is tabpy-server.
Parameters
----------
username : str
User name (login). Username is case insensitive.
password : str
Password in plain text.
"""
self._service.set_credentials(username, password)
|
|
import os, sys
import operator
import glob
import numpy as np
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeLog( parser , args, skip=False ):
try:
if( not skip ):
import time
td = time.localtime()
progname = parser.prog
fstr = '.'+progname.split('.py')[0]
fstr += '_{}h{}min{}sec_{:02d}{:02d}{}.log'.format(\
td.tm_hour,td.tm_min,td.tm_sec,td.tm_mday,\
td.tm_mon,td.tm_year)
fl = open(fstr, 'w')
logStr = progname
for arg, value in sorted(vars(args).items()):
logStr+=" --{} {} ".format(arg, value)
fl.write( logStr )
fl.write( '\n' )
fl.close()
logStr = fstr = None
except:
pass
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def sortTimes(resultDir="./"):
dirList = os.listdir(resultDir) # Obtain the list of directories
#dirList.sort() # Sort the list - this won't work properly for floats (in string format)
# We need a more robust sorting which uses numerical values
# This is accomplished by using tuples.
dirTuple = []
for dirStr in dirList:
try:
dirTuple.append((dirStr, float(dirStr)) ) # (str,float)
except ValueError:
pass
# Sort according to the numerical values: key=operator.itemgetter(1)
sortedTuple = sorted(dirTuple, key=operator.itemgetter(1))
sortedList = map(operator.itemgetter(0), sortedTuple) # Extract a list of sorted strings
return sortedList
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def selectFromList( L ):
n = 0
for entry in L:
print(" # [{}]: {}".format(n,entry))
n+=1
#print("\n Enter the selection number(s): \n")
Ids = []
e = input(" Selection number(s): ")
if( e == ''):
select = input(" Select All? [1-9]=> Yes, [Empty]=> No: ")
if( select == ''): sys.exit(' Exiting program.')
else: Ids.extend(range(len(L)))
else:
try: Ids.extend( list( map( int, e.split(','))) )
except: sys.exit(' Invalid entry. Exiting ...')
return Ids
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def filesFromList( searchStr , allFiles=False):
print(" Extracting files with search string (or path): {}".format(searchStr))
fileList = []
files = glob.glob(searchStr) # obtain the list of files
fileList.extend(files) # Lists are iterable
fileList.sort() # Sort the list alphabetically
n = 0
for f in fileList:
print(" # ["+str(n)+"]: "+ str(f))
n+=1
if( allFiles ):
fileNos = range(len(fileList))
else:
infoStr = """
Enter file number(s), use comma as separator:
Example 1: File Numbers = 1
Example 2: File Numbers = 0,2,3,
"""
print(infoStr)
e = input(" File Numbers = ")
if( e == ''):
select=input(" Select All? [1-9]=> Yes, [Empty]=> No: ")
if( select == ''):
sys.exit(' Exiting program.')
else:
fileNos = list(range(len(fileList)))
else:
try: fileNos = list( map( int, e.split(',') ) )
except: sys.exit(' Bad entry. Exiting ...')
return fileNos, fileList
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def removeEntriesFromList(L, delList):
for entry in delList:
try: L.remove(entry)
except: pass
return L
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def inputIfNone( iStr , qStr ):
if( not iStr ): iStr = raw_input(qStr)
if( not iStr ): sys.exit(" Error in inputIfNone. Exiting ...")
return iStr
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def reformatNumberInWord(word, separator):
'''
Search for number in the given string and change its format
such that a basic sorting algorithm works on it.
For example: 21 -> 0021.
'''
wrdList = word.split(separator)
for i in range(len(wrdList)):
try:
iwrd = int(wrdList[i])
wrdList[i] = '{:04d}'.format(iwrd)
except:
pass
# Put together the new filename
word_new = wrdList[0]
for i in range(1,len(wrdList)):
word_new += separator+wrdList[i]
return word_new
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def extractMatchingTerms( trialList , targetList , verbose=False ):
xList = []
for x in trialList:
if ( x in targetList ):
xList.append(x)
else:
if( verbose ): print('term = {} not present in the target list.'.format(x))
if( len(xList) == 0 and verbose ): print("Returning an empty list")
return xList
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def partialMatchFromList( matchStr, strList ):
w = None
nw = len(matchStr)
for word in strList:
l = min( nw , len(word) )
if( matchStr in word[:l] ):
w = word; break
return w
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def dataFromDict( keyStr, dataDict, allowNone=True ):
data = None
if( keyStr in dataDict.keys() ):
data = dataDict[keyStr]
elif( not allowNone ):
sys.exit(' Error in dataFromDict: {} not found. Exiting ...'.format(keyStr))
else:
pass
return data
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def basicAnalysis( x, xStr, xRef, printOn ):
x_mean = np.mean(x); x_max = np.max(x)
x_min = np.min(x); x_std = np.std(x)
N = float(len(x))
dx = x-xRef
x_rms = np.sqrt( np.sum( x**2 )/N )
dx_rms = np.sqrt( np.sum( dx**2 )/N )
if( printOn ):
pStr = '''
mean({0})= {1}
max({0}) = {2}
min({0}) = {3}
std({0}) = {4}
rms({0}) = {5}
drms({0}-{0}Ref) = {6}
'''.format(xStr, x_mean, x_max, x_min, x_std, x_rms, dx_rms)
print(pStr)
return x_mean, x_max, x_min, x_std, x_rms
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def openStlFile( solidName ):
solidName = solidName.split('.')[0] # Take away the possible .stl
header = 'solid {}'.format(solidName)
fx=file(solidName+'.stl' , 'w')
fx.write(header)
return fx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def closeStlFile( fs, solidName ):
# Take away the possible .stl
solidName = solidName.split('.')[0] # Take away the possible .stl
footer = '\nendsolid {}\n'.format(solidName)
fs.write(footer)
print(' Closing file {}.'.format(solidName))
fs.close()
print(' Done ! ')
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeStlFacet(fl, nv, v1, v2, v3 ):
fstr= '''
facet normal {0} {1} {2}
outer loop
vertex {3} {4} {5}
vertex {6} {7} {8}
vertex {9} {10} {11}
endloop
endfacet'''.format(nv[0],nv[1],nv[2], v1[0],v1[1],v1[2], v2[0],v2[1],v2[2],\
v3[0],v3[1],v3[2])
fl.write(fstr)
return fl
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def vtkWriteHeaderAndGridStructured2d( X, Y, Z, fileName, dataStr ):
nPoints = X.size
irows = len(X[:,0]); jcols = len(X[0,:])
header = '# vtk DataFile Version 2.0\n'\
+'{}\n'.format(dataStr)\
+'ASCII\n'\
+'DATASET STRUCTURED_GRID\n'\
+'DIMENSIONS {} {} {}\n'.format(jcols, irows, 1)\
+'POINTS {} float\n'.format( nPoints )
print(' Writing header for file {} ...'.format( fileName ))
fileName = fileName.split('.vtk')[0]+'-2D.vtk'
f = open(fileName, 'w')
f.write( header )
print(' Writing mesh data for file {} ...'.format( fileName ))
for i in range(irows):
for j in range(jcols):
s = '{0:.1f}\t{1:.1f}\t{2:.1f}\n'.format( X[i,j], Y[i,j], Z[i,j] )
f.write(s)
return f
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def vtkWritePointDataHeader( fx, V, nVars ):
nPoints = V.size
header ='POINT_DATA {}\n'.format(nPoints)\
+'FIELD attributes {}\n'.format(nVars)
fx.write(header)
return fx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def vtkWritePointDataStructured2D( fx, V, X, vStr ):
# Check that dimensions agree
irows = len(X[:,0]); jcols = len(X[0,:])
icheck = len(V[:,0]); jcheck = len(V[0,:])
if( irows != icheck or jcols != jcheck ):
sys.exit("dim(V) /= dim(X). Exiting ...")
try:
nPoints = X.size
FieldData ='{0} 1 {1} float\n'.format(vStr, nPoints)
print(' Writing {} field data ...'.format(vStr))
fx.write(FieldData)
for i in range(irows):
for j in range(jcols):
s = '{0:12.4e} '.format(V[i,j])
fx.write(s)
fx.write('\n') # Important to place a line change at the end.
print(' ... done!')
except:
pass
return fx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def vtkWriteDataStructured2d( V, X, Y, Z, fileName, dataStr ):
f_vtk = vtkWriteHeaderAndGridStructured2d( X, Y, Z, fileName, dataStr )
try:
f_vtk = vtkWritePointDataHeader( f_vtk, V, 1 )
f_vtk = vtkWritePointDataStructured2D( f_vtk, V, X, dataStr )
except:
pass
f_vtk.close()
print(' Writing VTK-data complete!')
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def vtkWriteUnsPointData( V, X, Y, Z, filename ):
nPoints = X.size
irows = len(X[:,0]); jcols = len(X[0,:])
header = '# vtk DataFile Version 2.0\n'\
+'Footprint Point Data\n'\
+'ASCII\n'\
+'DATASET UNSTRUCTURED_GRID\n'\
+'POINTS {} float\n'.format( nPoints )
pointdata ='POINT_DATA {}\n'.format(nPoints)\
+'SCALARS fp float 1\n'\
+'LOOKUP_TABLE fp\n'.format(nPoints)
print(' Writing file {} ...'.format( filename ))
filename = filename.split('.vtk')[0]+'.vtk'
f = open(filename, 'w')
f.write( header )
for i in range(irows):
for j in range(jcols):
s = '{0:.1f}\t{1:.1f}\t{2:.1f}\n'.format( X[i,j], Y[i,j], Z[i,j] )
f.write(s)
f.write(pointdata)
for i in range(irows):
for j in range(jcols):
s = '{0:.2f} '.format(V[i,j])
f.write(s)
fx.write('\n')
f.close()
print(' ... done!')
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
|
|
import sys
import mimetypes
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.utils.translation import ugettext as _
from django.contrib.sites.managers import CurrentSiteManager
from django.template.loader import select_template
from django.template import Context, TemplateDoesNotExist
from fields import (Metadata, SerializedObjectField,
MetadataJSONEncoder, MetadataJSONDecoder,
AutoSlugField)
from massmedia import settings as appsettings
from massmedia.utils import value_or_list, super_force_ascii
# Patch mimetypes w/ any extra types
mimetypes.types_map.update(appsettings.EXTRA_MIME_TYPES)
# This is required because the Hachoir package screws up the stdout and stderr
OUT, ERR = sys.stdout, sys.stderr
try:
from hachoir_core.error import HachoirError
from hachoir_core.stream import InputStreamError
from hachoir_parser import createParser
from hachoir_metadata import extractMetadata
EXTRACT_METADATA = True
except ImportError:
EXTRACT_METADATA = False
sys.stdout, sys.stderr = OUT, ERR
class PublicMediaManager(CurrentSiteManager):
def __init__(self):
super(PublicMediaManager, self).__init__('site')
def public(self):
return self.get_queryset().filter(public=True)
class Media(models.Model):
"""
The abstract base class for all media types. It includes all the common
attributes and functions.
"""
title = models.CharField(
_("Title"),
max_length=255)
slug = AutoSlugField(
populate_from='title',
verbose_name=_("Slug"),
unique=True,
max_length=255)
creation_date = models.DateTimeField(
_("Creation Date"),
auto_now_add=True)
author = models.ForeignKey(
User,
blank=True, null=True,
limit_choices_to={'is_staff': True})
one_off_author = models.CharField(
_('One-off Author'),
max_length=100,
blank=True)
caption = models.TextField(
_("Caption"),
blank=True)
metadata = SerializedObjectField(
_("Metadata"),
blank=True,
encoder=MetadataJSONEncoder,
decoder=MetadataJSONDecoder)
site = models.ForeignKey(
Site,
related_name='%(class)s_site')
reproduction_allowed = models.BooleanField(
_("we have reproduction rights for this media"),
default=True)
public = models.BooleanField(
_("Public"),
help_text=_("this media is publicly available"),
default=True)
external_url = models.URLField(
_("External URL"),
blank=True, null=True,
help_text=_("If this URL Field is set, the media will be pulled externally"))
mime_type = models.CharField(
_("MIME type"),
max_length=150,
blank=True, null=True)
width = models.IntegerField(
_("Width"),
blank=True, null=True,
help_text=_("The width of the widget for the media"))
height = models.IntegerField(
_("Height"),
blank=True, null=True,
help_text=_("The height of the widget for the media"))
widget_template = models.CharField(
_("Widget Template"),
max_length=255,
blank=True, null=True,
help_text=_("The template name used to generate the widget (defaults to MIME type layout)"))
objects = PublicMediaManager()
class Meta:
ordering = ('-creation_date',)
abstract = True
def __unicode__(self):
return self.title
@property
def author_name(self):
if self.author:
return self.author.full_name
else:
return self.one_off_author
@models.permalink
def get_absolute_url(self):
return ('massmedia_detail', (), {
'mediatype': self.__class__.__name__.lower(),
'slug': self.slug
})
@property
def media_url(self):
return self.external_url
def save(self, *args, **kwargs):
if self.site_id is None:
self.site = Site.objects.get_current()
super(Media, self).save(*args, **kwargs)
# That save needs to come before we look at the file otherwise the
# self.file.path is incorrect.
if hasattr(self, 'file') and self.file and not self.mime_type:
self.mime_type = mimetypes.guess_type(self.file.path)[0]
if self.external_url and not self.mime_type:
self.mime_type, blank = mimetypes.guess_type(self.external_url)
if not self.metadata and hasattr(self, 'file') and self.file and EXTRACT_METADATA:
self.parse_metadata()
kwargs.pop('force_insert', None)
super(Media, self).save(*args, **kwargs)
def thumb(self):
return "<p>" + _("No Thumbnail Available") + "</p>"
thumb.allow_tags = True
thumb.short_description = _("Thumbnail")
def get_mime_type(self):
if self.mime_type:
return self.mime_type
if self.metadata and 'mime_type' in self.metadata:
return self.metadata['mime_type']
return None
def get_template(self, template_type="detail"):
mime_type = self.get_mime_type()
if appsettings.FS_TEMPLATES:
if self.widget_template:
lookups = [self.widget_template]
else:
lookups = []
if mime_type is None:
lookups.append('massmedia/mediatypes/generic_%s.html' % template_type)
else:
lookups.extend([
'massmedia/mediatypes/%s_%s.html' % (mime_type, template_type),
'massmedia/mediatypes/%s/generic_%s.html' % (mime_type.split('/')[0], template_type),
'massmedia/mediatypes/generic_%s.html' % template_type
])
try:
return select_template(lookups)
except TemplateDoesNotExist:
raise TemplateDoesNotExist(_("Can't find a template to render the media. Looking in %s") % ", ".join(lookups))
else:
from massmedia.models import MediaTemplate
if self.widget_template:
lookups = [{'name': self.widget_template}]
elif mime_type is None:
lookups = [{'mimetype': ''}]
else:
lookups = [
dict(mimetype=mime_type, name=template_type),
dict(mimetype=mime_type.split('/')[0], name=template_type),
dict(mimetype='', name=template_type)
]
for kwargs in lookups:
try:
return MediaTemplate.objects.get(**kwargs)
except MediaTemplate.DoesNotExist:
pass
return MediaTemplate.objects.get(mimetype='').template()
def _render(self, template_type):
tmpl = self.get_template(template_type)
ctxt = Context({
'media': self,
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': getattr(settings, 'STATIC_URL', settings.MEDIA_URL)
})
return tmpl.render(ctxt)
def render_thumb(self):
return self._render('thumb')
render_thumb.allow_tags = True
def render_detail(self):
return self._render('detail')
def _get_raw_metadata(self, path):
"""
Return the raw metadata as a dictionary
"""
try:
parser = createParser(unicode(path))
if not parser:
if settings.DEBUG:
raise Exception("No parser was created.")
return {}
metadata = extractMetadata(parser, appsettings.INFO_QUALITY)
if not metadata:
# if settings.DEBUG:
# raise Exception("No metadata was extracted.")
return {}
except (InputStreamError, HachoirError):
if settings.DEBUG:
raise
return {}
return dict([(x.description, value_or_list([item.value for item in x.values])) for x in sorted(metadata) if x.values])
def parse_metadata(self):
data = self._get_raw_metadata(self.file.path)
for key, val in data.items():
if isinstance(val, basestring):
data[key] = super_force_ascii(val)
self.metadata = Metadata(data)
|
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import jmespath
from botocore.exceptions import ClientError
from touchdown.aws import common
from touchdown.aws.rds import Database
from touchdown.core import errors, plan
from touchdown.core.datetime import now, parse_datetime
def get_from_jmes(db, **kwargs):
new_kwargs = {}
for key, value in kwargs.items():
if callable(value):
value = value()
if value:
newval = jmespath.search(value, db)
if newval:
new_kwargs[key] = newval
return new_kwargs
class Plan(common.SimplePlan, plan.Plan):
name = "rollback"
resource = Database
service_name = "rds"
api_version = "2014-10-31"
def get_database(self, name):
try:
dbs = self.client.describe_db_instances(DBInstanceIdentifier=name).get(
"DBInstances", []
)
except ClientError:
return None
return dbs[0]
def check_snapshot(self, db, snapshot_name):
try:
snapshots = self.client.describe_db_snapshots(
DBInstanceIdentifier=db["DBInstanceIdentifier"],
DBSnapshotIdentifier=snapshot_name,
).get("DBSnapshots", [])
except ClientError:
raise errors.Error("Could not find snapshot {}".format(snapshot_name))
if len(snapshots) == 0:
raise errors.Error("Could not find snapshot {}".format(snapshot_name))
def check_point_in_time(self, db, point_in_time):
# Ensure we don't restore too recently. For example:
# 1. Obviously we can't restore the future
# 2. Equally, there is about 5 minutes of replication lag. We can only
# restore to periods that were over 5 minutes ago.
if point_in_time > db["LatestRestorableTime"]:
raise errors.Error(
"You cannot restore to {}. The most recent restorable time is {}".format(
point_in_time, db["LatestRestorableTime"]
)
)
# Ensure we don't restore before this instance even existed
if point_in_time < db["InstanceCreateTime"]:
raise errors.Error(
"You cannot restore to {} because it is before the instance was created ({})".format(
point_in_time, db["InstanceCreateTime"]
)
)
# We can't restore earlier than the oldest backup either
# With a caveat that InstanceIdentifier might imply a snapshot belongs
# to the current database when it doesn't. We filter on matching
# InstanceCreateTime to avoid that.
results = self.client.describe_db_snapshots(
DBInstanceIdentifier=db["DBInstanceIdentifier"]
)
snapshots = filter(
lambda snapshot: snapshot["InstanceCreateTime"] == db["InstanceCreateTime"],
results.get("DBSnapshots", []),
)
snapshots.sort(key=lambda snapshot: snapshot["SnapshotCreateTime"])
if not snapshots or point_in_time < snapshots[0]["SnapshotCreateTime"]:
raise errors.Error(
"You cannot restore to {} because it is before the first available backup was created ({})".format(
point_in_time, snapshots[0]["SnapshotCreateTime"]
)
)
def rename_database(self, from_name, to_name):
print("Renaming {} to {}".format(from_name, to_name))
self.client.modify_db_instance(
DBInstanceIdentifier=from_name,
NewDBInstanceIdentifier=to_name,
ApplyImmediately=True,
)
def delete_database(self, name):
print("Deleting old database")
self.client.delete_db_instance(
DBInstanceIdentifier=name, SkipFinalSnapshot=True
)
def wait_for_database(self, name):
print("Waiting for database to be ready")
while True:
db = self.get_database(name)
if (
db
and db["DBInstanceStatus"] == "available"
and len(db["PendingModifiedValues"]) == 0
):
return
time.sleep(10)
def copy_database_settings(self, db_name, db):
print("Restoring database settings")
self.client.modify_db_instance(
DBInstanceIdentifier=db_name,
ApplyImmediately=True,
**get_from_jmes(
db,
AllocatedStorage="AllocatedStorage",
DBSecurityGroups="DBSecurityGroups[?Status == 'active'].DBSecurityGroupName",
VpcSecurityGroupIds="VpcSecurityGroups[?Status == 'active'].VpcSecurityGroupId",
DBParameterGroupName="DBParameterGroups[0].DBParameterGroupName",
BackupRetentionPeriod="BackupRetentionPeriod",
PreferredBackupWindow="PreferredBackupWindow",
PreferredMaintenanceWindow="PreferredMaintenanceWindow",
EngineVersion="EngineVersion",
CACertificateIdentifier="CACertificateIdentifier",
)
)
def check(self, target):
self.old_db_name = "{}-{:%Y%m%d%H%M%S}".format(self.resource.name, now())
self.db = self.get_database(self.resource.name)
if not self.db:
raise errors.Error("Database {} not found?".format(self.resource.name))
if self.get_database(self.old_db_name):
raise errors.Error(
"Database {} already exists - restore in progress?".format(
self.old_db_name
)
)
self.datetime_target = None
try:
self.datetime_target = parse_datetime(target)
self.check_point_in_time(self.db, self.datetime_target)
except errors.Error:
self.check_snapshot(self.db, target)
def rollback(self, target):
self.rename_database(self.resource.name, self.old_db_name)
self.wait_for_database(self.old_db_name)
kwargs = get_from_jmes(
self.db,
DBInstanceClass="DBInstanceClass",
Port="Endpoint.Port",
AvailabilityZone=lambda: "AvailabilityZone"
if not self.db.get("MultiAZ", False)
else None,
DBSubnetGroupName="DBSubnetGroup.DBSubnetGroupName",
MultiAZ="MultiAZ",
PubliclyAccessible="PubliclyAccessible",
AutoMinorVersionUpgrade="AutoMinorVersionUpgrade",
LicenseModel="LicenseModel",
DBName=lambda: "DBName" if self.db["Engine"] != "postgres" else None,
Engine="Engine",
Iops="Iops",
OptionGroupName="OptionGroupMemberships[0].OptionGroupName",
StorageType="StorageType",
TdeCredentialArn="TdeCredentialArn",
)
print("Spinning database up from backup")
if self.datetime_target:
self.client.restore_db_instance_to_point_in_time(
SourceDBInstanceIdentifier=self.old_db_name,
TargetDBInstanceIdentifier=self.resource.name,
RestoreTime=self.datetime_target,
**kwargs
)
else:
self.client.restore_db_instance_from_db_snapshot(
DBInstanceIdentifier=self.resource.name,
DBSnapshotIdentifier=target,
**kwargs
)
self.wait_for_database(self.resource.name)
self.copy_database_settings(self.resource.name, self.db)
self.wait_for_database(self.resource.name)
self.delete_database(self.old_db_name)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this swift except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Swift backend store"""
import StringIO
import hashlib
import httplib
import tempfile
import unittest
import stubout
import swift.common.client
from glance.common import exception
from glance.common import utils
from glance.store import BackendException
import glance.store.swift
from glance.store.location import get_location_from_uri
from glance.tests import utils as test_utils
FAKE_UUID = utils.generate_uuid
Store = glance.store.swift.Store
FIVE_KB = (5 * 1024)
FIVE_GB = (5 * 1024 * 1024 * 1024)
MAX_SWIFT_OBJECT_SIZE = FIVE_GB
SWIFT_PUT_OBJECT_CALLS = 0
SWIFT_CONF = {'verbose': True,
'debug': True,
'swift_store_user': 'user',
'swift_store_key': 'key',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance'}
# We stub out as little as possible to ensure that the code paths
# between glance.store.swift and swift.common.client are tested
# thoroughly
def stub_out_swift_common_client(stubs):
fixture_containers = ['glance']
fixture_headers = {'glance/%s' % FAKE_UUID:
{'content-length': FIVE_KB,
'etag': 'c2e5db72bd7fd153f53ede5da5a06de3'}}
fixture_objects = {'glance/%s' % FAKE_UUID:
StringIO.StringIO("*" * FIVE_KB)}
def fake_head_container(url, token, container, **kwargs):
if container not in fixture_containers:
msg = "No container %s found" % container
raise swift.common.client.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_put_container(url, token, container, **kwargs):
fixture_containers.append(container)
def fake_put_object(url, token, container, name, contents, **kwargs):
# PUT returns the ETag header for the newly-added object
# Large object manifest...
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS += 1
fixture_key = "%s/%s" % (container, name)
if not fixture_key in fixture_headers.keys():
if kwargs.get('headers'):
etag = kwargs['headers']['ETag']
fixture_headers[fixture_key] = {'manifest': True,
'etag': etag}
return etag
if hasattr(contents, 'read'):
fixture_object = StringIO.StringIO()
chunk = contents.read(Store.CHUNKSIZE)
checksum = hashlib.md5()
while chunk:
fixture_object.write(chunk)
checksum.update(chunk)
chunk = contents.read(Store.CHUNKSIZE)
etag = checksum.hexdigest()
else:
fixture_object = StringIO.StringIO(contents)
etag = hashlib.md5(fixture_object.getvalue()).hexdigest()
read_len = fixture_object.len
if read_len > MAX_SWIFT_OBJECT_SIZE:
msg = ('Image size:%d exceeds Swift max:%d' %
(read_len, MAX_SWIFT_OBJECT_SIZE))
raise swift.common.client.ClientException(
msg, http_status=httplib.REQUEST_ENTITY_TOO_LARGE)
fixture_objects[fixture_key] = fixture_object
fixture_headers[fixture_key] = {
'content-length': read_len,
'etag': etag}
return etag
else:
msg = ("Object PUT failed - Object with key %s already exists"
% fixture_key)
raise swift.common.client.ClientException(msg,
http_status=httplib.CONFLICT)
def fake_get_object(url, token, container, name, **kwargs):
# GET returns the tuple (list of headers, file object)
fixture_key = "%s/%s" % (container, name)
if not fixture_key in fixture_headers:
msg = "Object GET failed"
raise swift.common.client.ClientException(msg,
http_status=httplib.NOT_FOUND)
fixture = fixture_headers[fixture_key]
if 'manifest' in fixture:
# Large object manifest... we return a file containing
# all objects with prefix of this fixture key
chunk_keys = sorted([k for k in fixture_headers.keys()
if k.startswith(fixture_key) and
k != fixture_key])
result = StringIO.StringIO()
for key in chunk_keys:
result.write(fixture_objects[key].getvalue())
return fixture_headers[fixture_key], result
else:
return fixture_headers[fixture_key], fixture_objects[fixture_key]
def fake_head_object(url, token, container, name, **kwargs):
# HEAD returns the list of headers for an object
try:
fixture_key = "%s/%s" % (container, name)
return fixture_headers[fixture_key]
except KeyError:
msg = "Object HEAD failed - Object does not exist"
raise swift.common.client.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_delete_object(url, token, container, name, **kwargs):
# DELETE returns nothing
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers.keys():
msg = "Object DELETE failed - Object does not exist"
raise swift.common.client.ClientException(msg,
http_status=httplib.NOT_FOUND)
else:
del fixture_headers[fixture_key]
del fixture_objects[fixture_key]
def fake_http_connection(*args, **kwargs):
return None
def fake_get_auth(url, *args, **kwargs):
if 'http' in url and '://' not in url:
raise ValueError('Invalid url %s' % url)
return None, None
stubs.Set(swift.common.client,
'head_container', fake_head_container)
stubs.Set(swift.common.client,
'put_container', fake_put_container)
stubs.Set(swift.common.client,
'put_object', fake_put_object)
stubs.Set(swift.common.client,
'delete_object', fake_delete_object)
stubs.Set(swift.common.client,
'head_object', fake_head_object)
stubs.Set(swift.common.client,
'get_object', fake_get_object)
stubs.Set(swift.common.client,
'get_auth', fake_get_auth)
stubs.Set(swift.common.client,
'http_connection', fake_http_connection)
class TestStore(unittest.TestCase):
def setUp(self):
"""Establish a clean test environment"""
self.stubs = stubout.StubOutForTesting()
stub_out_swift_common_client(self.stubs)
self.store = Store(test_utils.TestConfigOpts(SWIFT_CONF))
def tearDown(self):
"""Clear the test environment"""
self.stubs.UnsetAll()
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
uri = "swift://user:key@auth_address/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_http_auth(self):
"""
Test a retrieval from Swift with an HTTP authurl. This is
specified either via a Location header with swift+http:// or using
http:// in the swift_store_auth_address config value
"""
loc = get_location_from_uri("swift+http://user:key@auth_address/"
"glance/%s" % FAKE_UUID)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
"""
Test that trying to retrieve a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://user:key@authurl/glance/noexist")
self.assertRaises(exception.NotFound,
self.store.get,
loc)
def test_add(self):
"""Test that we can add an image via the swift backend"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = utils.generate_uuid()
expected_location = 'swift+https://user:key@localhost:8080' + \
'/glance/%s' % expected_image_id
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
location, size, checksum = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEquals(expected_location, location)
self.assertEquals(expected_swift_size, size)
self.assertEquals(expected_checksum, checksum)
# Expecting a single object to be created on Swift i.e. no chunking.
self.assertEquals(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = new_image_swift.len
self.assertEquals(expected_swift_contents, new_image_contents)
self.assertEquals(expected_swift_size, new_image_swift_size)
def test_add_auth_url_variations(self):
"""
Test that we can add an image via the swift backend with
a variety of different auth_address values
"""
variations = {
'http://localhost:80': 'swift+http://user:key@localhost:80'
'/glance/%s',
'http://localhost': 'swift+http://user:key@localhost/glance/%s',
'http://localhost/v1': 'swift+http://user:key@localhost'
'/v1/glance/%s',
'http://localhost/v1/': 'swift+http://user:key@localhost'
'/v1/glance/%s',
'https://localhost': 'swift+https://user:key@localhost/glance/%s',
'https://localhost:8080': 'swift+https://user:key@localhost:8080'
'/glance/%s',
'https://localhost/v1': 'swift+https://user:key@localhost'
'/v1/glance/%s',
'https://localhost/v1/': 'swift+https://user:key@localhost'
'/v1/glance/%s',
'localhost': 'swift+https://user:key@localhost/glance/%s',
'localhost:8080/v1': 'swift+https://user:key@localhost:8080'
'/v1/glance/%s',
}
for variation, expected_location in variations.items():
image_id = utils.generate_uuid()
expected_location = expected_location % image_id
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = \
hashlib.md5(expected_swift_contents).hexdigest()
new_conf = SWIFT_CONF.copy()
new_conf['swift_store_auth_address'] = variation
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(test_utils.TestConfigOpts(new_conf))
location, size, checksum = self.store.add(image_id, image_swift,
expected_swift_size)
self.assertEquals(expected_location, location)
self.assertEquals(expected_swift_size, size)
self.assertEquals(expected_checksum, checksum)
self.assertEquals(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = new_image_swift.len
self.assertEquals(expected_swift_contents, new_image_contents)
self.assertEquals(expected_swift_size, new_image_swift_size)
def test_add_no_container_no_create(self):
"""
Tests that adding an image with a non-existing container
raises an appropriate exception
"""
conf = SWIFT_CONF.copy()
conf['swift_store_create_container_on_put'] = 'False'
conf['swift_store_container'] = 'noexist'
image_swift = StringIO.StringIO("nevergonnamakeit")
self.store = Store(test_utils.TestConfigOpts(conf))
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# We check the exception text to ensure the container
# missing text is found in it, otherwise, we would have
# simply used self.assertRaises here
exception_caught = False
try:
self.store.add(utils.generate_uuid(), image_swift, 0)
except BackendException, e:
exception_caught = True
self.assertTrue("container noexist does not exist "
"in Swift" in str(e))
self.assertTrue(exception_caught)
self.assertEquals(SWIFT_PUT_OBJECT_CALLS, 0)
def test_add_no_container_and_create(self):
"""
Tests that adding an image with a non-existing container
creates the container automatically if flag is set
"""
conf = SWIFT_CONF.copy()
conf['swift_store_create_container_on_put'] = 'True'
conf['swift_store_container'] = 'noexist'
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = utils.generate_uuid()
expected_location = 'swift+https://user:key@localhost:8080' + \
'/noexist/%s' % expected_image_id
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(test_utils.TestConfigOpts(conf))
location, size, checksum = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEquals(expected_location, location)
self.assertEquals(expected_swift_size, size)
self.assertEquals(expected_checksum, checksum)
self.assertEquals(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = new_image_swift.len
self.assertEquals(expected_swift_contents, new_image_contents)
self.assertEquals(expected_swift_size, new_image_swift_size)
def test_add_large_object(self):
"""
Tests that adding a very large image. We simulate the large
object by setting store.large_object_size to a small number
and then verify that there have been a number of calls to
put_object()...
"""
conf = SWIFT_CONF.copy()
conf['swift_store_container'] = 'glance'
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = utils.generate_uuid()
expected_location = 'swift+https://user:key@localhost:8080' + \
'/glance/%s' % expected_image_id
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(test_utils.TestConfigOpts(conf))
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
try:
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
self.assertEquals(expected_location, location)
self.assertEquals(expected_swift_size, size)
self.assertEquals(expected_checksum, checksum)
# Expecting 6 objects to be created on Swift -- 5 chunks and 1
# manifest.
self.assertEquals(SWIFT_PUT_OBJECT_CALLS, 6)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = new_image_swift.len
self.assertEquals(expected_swift_contents, new_image_contents)
self.assertEquals(expected_swift_size, new_image_swift_size)
def test_add_large_object_zero_size(self):
"""
Tests that adding an image to Swift which has both an unknown size and
exceeds Swift's maximum limit of 5GB is correctly uploaded.
We avoid the overhead of creating a 5GB object for this test by
temporarily setting MAX_SWIFT_OBJECT_SIZE to 1KB, and then adding
an object of 5KB.
Bug lp:891738
"""
conf = SWIFT_CONF.copy()
conf['swift_store_container'] = 'glance'
# Set up a 'large' image of 5KB
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = utils.generate_uuid()
expected_location = 'swift+https://user:key@localhost:8080' + \
'/glance/%s' % expected_image_id
image_swift = StringIO.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# Temporarily set Swift MAX_SWIFT_OBJECT_SIZE to 1KB and add our image,
# explicitly setting the image_length to 0
self.store = Store(test_utils.TestConfigOpts(conf))
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
global MAX_SWIFT_OBJECT_SIZE
orig_max_swift_object_size = MAX_SWIFT_OBJECT_SIZE
try:
MAX_SWIFT_OBJECT_SIZE = 1024
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum = self.store.add(expected_image_id,
image_swift, 0)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
MAX_SWIFT_OBJECT_SIZE = orig_max_swift_object_size
self.assertEquals(expected_location, location)
self.assertEquals(expected_swift_size, size)
self.assertEquals(expected_checksum, checksum)
# Expecting 7 calls to put_object -- 5 chunks, a zero chunk which is
# then deleted, and the manifest. Note the difference with above
# where the image_size is specified in advance (there's no zero chunk
# in that case).
self.assertEquals(SWIFT_PUT_OBJECT_CALLS, 7)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = new_image_swift.getvalue()
new_image_swift_size = new_image_swift.len
self.assertEquals(expected_swift_contents, new_image_contents)
self.assertEquals(expected_swift_size, new_image_swift_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_swift = StringIO.StringIO("nevergonnamakeit")
self.assertRaises(exception.Duplicate,
self.store.add,
FAKE_UUID, image_swift, 0)
def _option_required(self, key):
conf = SWIFT_CONF.copy()
del conf[key]
try:
self.store = Store(test_utils.TestConfigOpts(conf))
return self.store.add == self.store.add_disabled
except:
return False
return False
def test_no_user(self):
"""
Tests that options without user disables the add method
"""
self.assertTrue(self._option_required('swift_store_user'))
def test_no_key(self):
"""
Tests that options without key disables the add method
"""
self.assertTrue(self._option_required('swift_store_key'))
def test_no_auth_address(self):
"""
Tests that options without auth address disables the add method
"""
self.assertTrue(self._option_required('swift_store_auth_address'))
def test_delete(self):
"""
Test we can delete an existing image in the swift store
"""
uri = "swift://user:key@authurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://user:key@authurl/glance/noexist")
self.assertRaises(exception.NotFound, self.store.delete, loc)
class TestChunkReader(unittest.TestCase):
def test_read_all_data(self):
"""
Replicate what goes on in the Swift driver with the
repeated creation of the ChunkReader object
"""
CHUNKSIZE = 100
checksum = hashlib.md5()
data_file = tempfile.NamedTemporaryFile()
data_file.write('*' * 1024)
data_file.flush()
infile = open(data_file.name, 'rb')
bytes_read = 0
while True:
cr = glance.store.swift.ChunkReader(infile, checksum, CHUNKSIZE)
chunk = cr.read(CHUNKSIZE)
bytes_read += len(chunk)
if len(chunk) == 0:
break
self.assertEqual(1024, bytes_read)
data_file.close()
|
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
r"""
This module allows you to connect to an HDFS installation, read and
write files and get information on files, directories and global
filesystem properties.
Configuration
-------------
The hdfs module is built on top of ``libhdfs``, in turn a JNI wrapper
around the Java fs code: therefore, for the module to work properly,
the ``CLASSPATH`` environment variable must include all paths to the
relevant Hadoop jars. Pydoop will do this for you, but it needs to
know where your Hadoop installation is located and what is your hadoop
configuration directory: if Pydoop is not able to automatically find
these directories, you have to make sure that the ``HADOOP_HOME`` and
``HADOOP_CONF_DIR`` environment variables are set to the appropriate
values.
Another important environment variable for this module is
``LIBHDFS_OPTS``\ . This is used to set options for the JVM on top of
which the module runs, most notably the amount of memory it uses. If
``LIBHDFS_OPTS`` is not set, the C libhdfs will let it fall back to
the default for your system, typically 1 GB. According to our
experience, this is *much* more than most applications need and adds a
lot of unnecessary memory overhead. For this reason, the hdfs module
sets ``LIBHDFS_OPTS`` to ``-Xmx48m``\ , a value that we found to be
appropriate for most applications. If your needs are different, you
can set the environment variable externally and it will override the
above setting.
"""
__all__ = [
'path',
'init',
'reset',
'hdfs',
'default_is_local',
'open',
'dump',
'load',
'cp',
'put',
'get',
'mkdir',
'rmr',
'lsl',
'ls',
'chmod',
'move',
'chown',
'rename',
'renames',
'stat',
'lstat',
'access',
'utime',
]
import os
import operator
from functools import reduce
import pydoop
from . import common, path
from pydoop.utils.py3compat import bintype
try:
_ORIG_CLASSPATH
except NameError:
_ORIG_CLASSPATH = os.getenv("CLASSPATH", "")
# --- MODULE CONFIG ---
def init():
os.environ["CLASSPATH"] = "%s:%s:%s" % (
pydoop.hadoop_classpath(), _ORIG_CLASSPATH, pydoop.hadoop_conf()
)
os.environ["LIBHDFS_OPTS"] = os.getenv(
"LIBHDFS_OPTS", common.DEFAULT_LIBHDFS_OPTS
) + " -Djava.library.path=%s" % pydoop.hadoop_native()
init()
def reset():
pydoop.reset()
init()
# ---------------------
from .fs import hdfs, default_is_local
def open(hdfs_path, mode="r", buff_size=0, replication=0, blocksize=0,
user=None, encoding=None, errors=None):
"""
Open a file, returning an :class:`~.file.hdfs_file` object.
``hdfs_path`` and ``user`` are passed to :func:`~path.split`,
while the other args are passed to the :class:`~.file.hdfs_file`
constructor.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
return fs.open_file(path_, mode, buff_size, replication, blocksize,
encoding, errors)
def dump(data, hdfs_path, **kwargs):
"""\
Write ``data`` to ``hdfs_path``.
Keyword arguments are passed to :func:`open`, except for ``mode``,
which is forced to ``"w"`` (or ``"wt"`` for text data).
"""
kwargs["mode"] = "w" if isinstance(data, bintype) else "wt"
with open(hdfs_path, **kwargs) as fo:
i = 0
bufsize = common.BUFSIZE
while i < len(data):
fo.write(data[i: i + bufsize])
i += bufsize
fo.fs.close()
def load(hdfs_path, **kwargs):
"""\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
"""
m, _ = common.parse_mode(kwargs.get("mode", "r"))
if m != "r":
raise ValueError("opening mode must be readonly")
data = []
with open(hdfs_path, **kwargs) as fi:
bufsize = common.BUFSIZE
while 1:
chunk = fi.read(bufsize)
if chunk:
data.append(chunk)
else:
break
fi.fs.close()
return reduce(operator.add, data)
def _cp_file(src_fs, src_path, dest_fs, dest_path, **kwargs):
kwargs.pop("mode", None)
kwargs["mode"] = "r"
with src_fs.open_file(src_path, **kwargs) as fi:
kwargs["mode"] = "w"
with dest_fs.open_file(dest_path, **kwargs) as fo:
bufsize = common.BUFSIZE
while 1:
chunk = fi.read(bufsize)
if chunk:
fo.write(chunk)
else:
break
def cp(src_hdfs_path, dest_hdfs_path, **kwargs):
"""\
Copy the contents of ``src_hdfs_path`` to ``dest_hdfs_path``.
If ``src_hdfs_path`` is a directory, its contents will be copied
recursively. Source file(s) are opened for reading and copies are
opened for writing. Additional keyword arguments, if any, are
handled like in :func:`open`.
"""
src, dest = {}, {}
try:
for d, p in ((src, src_hdfs_path), (dest, dest_hdfs_path)):
d["host"], d["port"], d["path"] = path.split(p)
d["fs"] = hdfs(d["host"], d["port"])
# --- does src exist? ---
try:
src["info"] = src["fs"].get_path_info(src["path"])
except IOError:
raise IOError("no such file or directory: %r" % (src["path"]))
# --- src exists. Does dest exist? ---
try:
dest["info"] = dest["fs"].get_path_info(dest["path"])
except IOError:
if src["info"]["kind"] == "file":
_cp_file(src["fs"], src["path"], dest["fs"], dest["path"],
**kwargs)
return
else:
dest["fs"].create_directory(dest["path"])
dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"]
for item in src["fs"].list_directory(src["path"]):
cp(item["name"], dest_hdfs_path, **kwargs)
return
# --- dest exists. Is it a file? ---
if dest["info"]["kind"] == "file":
raise IOError("%r already exists" % (dest["path"]))
# --- dest is a directory ---
dest["path"] = path.join(dest["path"], path.basename(src["path"]))
if dest["fs"].exists(dest["path"]):
raise IOError("%r already exists" % (dest["path"]))
if src["info"]["kind"] == "file":
_cp_file(src["fs"], src["path"], dest["fs"], dest["path"],
**kwargs)
else:
dest["fs"].create_directory(dest["path"])
dest_hdfs_path = dest["fs"].get_path_info(dest["path"])["name"]
for item in src["fs"].list_directory(src["path"]):
cp(item["name"], dest_hdfs_path, **kwargs)
finally:
for d in src, dest:
try:
d["fs"].close()
except KeyError:
pass
def put(src_path, dest_hdfs_path, **kwargs):
"""\
Copy the contents of ``src_path`` to ``dest_hdfs_path``.
``src_path`` is forced to be interpreted as an ordinary local path
(see :func:`~path.abspath`). The source file is opened for reading
and the copy is opened for writing. Additional keyword arguments,
if any, are handled like in :func:`open`.
"""
cp(path.abspath(src_path, local=True), dest_hdfs_path, **kwargs)
def get(src_hdfs_path, dest_path, **kwargs):
"""\
Copy the contents of ``src_hdfs_path`` to ``dest_path``.
``dest_path`` is forced to be interpreted as an ordinary local
path (see :func:`~path.abspath`). The source file is opened for
reading and the copy is opened for writing. Additional keyword
arguments, if any, are handled like in :func:`open`.
"""
cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs)
def mkdir(hdfs_path, user=None):
"""
Create a directory and its parents as needed.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
retval = fs.create_directory(path_)
fs.close()
return retval
def rmr(hdfs_path, user=None):
"""
Recursively remove files and directories.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
retval = fs.delete(path_)
fs.close()
return retval
def lsl(hdfs_path, user=None, recursive=False):
"""
Return a list of dictionaries of file properties.
If ``hdfs_path`` is a file, there is only one item corresponding to
the file itself; if it is a directory and ``recursive`` is
:obj:`False`, each list item corresponds to a file or directory
contained by it; if it is a directory and ``recursive`` is
:obj:`True`, the list contains one item for every file or directory
in the tree rooted at ``hdfs_path``.
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
if not recursive:
dir_list = fs.list_directory(path_)
else:
treewalk = fs.walk(path_)
top = next(treewalk)
if top['kind'] == 'directory':
dir_list = list(treewalk)
else:
dir_list = [top]
fs.close()
return dir_list
def ls(hdfs_path, user=None, recursive=False):
"""
Return a list of hdfs paths.
Works in the same way as :func:`lsl`, except for the fact that list
items are hdfs paths instead of dictionaries of properties.
"""
dir_list = lsl(hdfs_path, user, recursive)
return [d["name"] for d in dir_list]
def chmod(hdfs_path, mode, user=None):
"""
Change file mode bits.
:type path: string
:param path: the path to the file or directory
:type mode: int
:param mode: the bitmask to set it to (e.g., 0777)
"""
host, port, path_ = path.split(hdfs_path, user)
fs = hdfs(host, port, user)
retval = fs.chmod(path_, mode)
fs.close()
return retval
def move(src, dest, user=None):
"""
Move or rename src to dest.
"""
src_host, src_port, src_path = path.split(src, user)
dest_host, dest_port, dest_path = path.split(dest, user)
src_fs = hdfs(src_host, src_port, user)
dest_fs = hdfs(dest_host, dest_port, user)
try:
retval = src_fs.move(src_path, dest_fs, dest_path)
return retval
finally:
src_fs.close()
dest_fs.close()
def chown(hdfs_path, user=None, group=None, hdfs_user=None):
"""
See :meth:`fs.hdfs.chown`.
"""
user = user or ''
group = group or ''
host, port, path_ = path.split(hdfs_path, hdfs_user)
with hdfs(host, port, hdfs_user) as fs:
return fs.chown(path_, user=user, group=group)
def rename(from_path, to_path, user=None):
"""
See :meth:`fs.hdfs.rename`.
"""
fhost, fport, fpath = path.split(from_path, user)
thost, tport, tpath = path.split(to_path, user)
with hdfs(thost, tport, user) as fs:
chost, cport = fs.host, fs.port
with hdfs(fhost, fport, user) as fs:
if fs.host != chost or fs.port != cport:
raise RuntimeError("can't do a cross-fs rename")
return fs.rename(fpath, tpath)
def renames(from_path, to_path, user=None):
"""
Rename ``from_path`` to ``to_path``, creating parents as needed.
"""
to_dir = path.dirname(to_path)
if to_dir:
mkdir(to_dir, user=user)
rename(from_path, to_path, user=user)
# direct bindings
stat = path.stat
lstat = path.lstat
access = path.access
utime = path.utime
|
|
# Copyright (c) 2015 Tintri. All rights reserved.
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Tintri storage.
"""
import datetime
import json
import math
import os
import re
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import requests
from six.moves import urllib
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers import nfs
LOG = logging.getLogger(__name__)
default_api_version = 'v310'
img_prefix = 'image-'
tintri_path = '/tintri/'
tintri_opts = [
cfg.StrOpt('tintri_server_hostname',
help='The hostname (or IP address) for the storage system'),
cfg.StrOpt('tintri_server_username',
help='User name for the storage system'),
cfg.StrOpt('tintri_server_password',
help='Password for the storage system',
secret=True),
cfg.StrOpt('tintri_api_version',
default=default_api_version,
help='API version for the storage system'),
cfg.IntOpt('tintri_image_cache_expiry_days',
default=30,
help='Delete unused image snapshots older than mentioned days'),
cfg.StrOpt('tintri_image_shares_config',
help='Path to image nfs shares file'),
]
CONF = cfg.CONF
CONF.register_opts(tintri_opts)
@interface.volumedriver
class TintriDriver(driver.ManageableVD,
driver.CloneableImageVD,
driver.SnapshotVD,
nfs.NfsDriver):
"""Base class for Tintri driver.
Version History
.. code-block:: none
2.1.0.1 - Liberty driver
2.2.0.1 - Mitaka driver
-- Retype
-- Image cache clean up
-- Direct image clone fix
"""
VENDOR = 'Tintri'
VERSION = '2.2.0.1'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Tintri_CI"
REQUIRED_OPTIONS = ['tintri_server_hostname', 'tintri_server_username',
'tintri_server_password']
def __init__(self, *args, **kwargs):
self._execute = None
self._context = None
super(TintriDriver, self).__init__(*args, **kwargs)
self._execute_as_root = True
self.configuration.append_config_values(tintri_opts)
self.cache_cleanup = False
self._mounted_image_shares = []
def do_setup(self, context):
self._image_shares_config = getattr(self.configuration,
'tintri_image_shares_config')
super(TintriDriver, self).do_setup(context)
self._context = context
self._check_ops(self.REQUIRED_OPTIONS, self.configuration)
self._hostname = getattr(self.configuration, 'tintri_server_hostname')
self._username = getattr(self.configuration, 'tintri_server_username')
self._password = getattr(self.configuration, 'tintri_server_password')
self._api_version = getattr(self.configuration, 'tintri_api_version',
CONF.tintri_api_version)
self._image_cache_expiry = getattr(self.configuration,
'tintri_image_cache_expiry_days',
CONF.tintri_image_cache_expiry_days)
def get_pool(self, volume):
"""Returns pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
return volume['provider_location']
def _get_client(self):
"""Returns a Tintri REST client connection."""
return TClient(self._hostname, self._username, self._password,
self._api_version)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
(__, path) = self._get_export_ip_path(snapshot.volume_id)
volume_path = '%s/%s' % (path, snapshot.volume_name)
volume_path = '%(path)s/%(volume_name)s' % {
'path': path,
'volume_name': snapshot.volume_name,
}
model_update = {}
with self._get_client() as c:
provider_id = c.create_snapshot(volume_path,
snapshot.volume.display_name or
snapshot.volume_name,
snapshot.volume_id,
snapshot.display_name or
snapshot.name)
snapshot.provider_id = provider_id
# Store Tintri snapshot ID as snapshot provider_id
model_update['provider_id'] = provider_id
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if snapshot.provider_id:
with self._get_client() as c:
c.delete_snapshot(snapshot.provider_id)
else:
LOG.info(_LI('Snapshot %s not found'), snapshot.name)
def _check_ops(self, required_ops, configuration):
"""Ensures that the options we care about are set."""
for op in required_ops:
if not getattr(configuration, op):
LOG.error(_LE('Configuration value %s is not set.'), op)
raise exception.InvalidConfigurationValue(option=op,
value=None)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from snapshot."""
vol_size = volume.size
snap_size = snapshot.volume_size
self._clone_snapshot(snapshot.provider_id, volume.name,
snapshot.volume_id)
share = self._get_provider_location(snapshot.volume_id)
volume['provider_location'] = share
path = self.local_path(volume)
self._set_rw_permissions(path)
if vol_size != snap_size:
try:
self.extend_volume(volume, vol_size)
except Exception:
LOG.error(_LE('Resizing %s failed. Cleaning volume.'),
volume.name)
self._delete_file(path)
raise
return {'provider_location': volume['provider_location']}
def _clone_snapshot(self, snapshot_id, clone_name, volume_id, share=None):
"""Clones volume from snapshot."""
(host, path) = self._get_export_ip_path(volume_id, share)
clone_path = '%s/%s-d' % (path, clone_name)
with self._get_client() as c:
c.clone_volume(snapshot_id, clone_path)
self._move_cloned_volume(clone_name, volume_id, share)
def _move_cloned_volume(self, clone_name, volume_id, share=None):
local_path = self._get_local_path(volume_id, share)
source_path = os.path.join(local_path, clone_name + '-d')
if self._is_volume_present(source_path):
source_file = os.listdir(source_path)[0]
source = os.path.join(source_path, source_file)
target = os.path.join(local_path, clone_name)
moved = self._move_file(source, target)
self._execute('rm', '-rf', source_path,
run_as_root=self._execute_as_root)
if not moved:
msg = (_('Failed to move volume %s.') % source)
raise exception.VolumeDriverException(msg)
else:
raise exception.VolumeDriverException(
_('Volume %s not found.') % source_path)
def _clone_volume_to_volume(self, volume_name, clone_name,
volume_display_name, volume_id,
share=None, dst=None, image_id=None):
"""Creates volume snapshot then clones volume."""
(__, path) = self._get_export_ip_path(volume_id, share)
volume_path = '%s/%s' % (path, volume_name)
if dst:
(___, dst_path) = self._get_export_ip_path(None, dst)
clone_path = '%s/%s-d' % (dst_path, clone_name)
else:
clone_path = '%s/%s-d' % (path, clone_name)
with self._get_client() as c:
if share and image_id:
snapshot_id = self._create_image_snapshot(volume_name,
share,
image_id,
volume_display_name)
else:
snapshot_id = c.create_snapshot(
volume_path, volume_display_name, volume_id, volume_name,
deletion_policy='DELETE_ON_ZERO_CLONE_REFERENCES')
c.clone_volume(snapshot_id, clone_path)
self._move_cloned_volume(clone_name, volume_id, dst or share)
@utils.synchronized('cache_cleanup')
def _initiate_image_cache_cleanup(self):
if self.cache_cleanup:
LOG.debug('Image cache cleanup in progress.')
return
else:
self.cache_cleanup = True
timer = loopingcall.FixedIntervalLoopingCall(
self._cleanup_cache)
timer.start(interval=None)
return timer
def _cleanup_cache(self):
LOG.debug('Cache cleanup: starting.')
try:
# Cleanup used cached image snapshots 30 days and older
t = datetime.datetime.utcnow() - datetime.timedelta(
days=self._image_cache_expiry)
date = t.strftime("%Y-%m-%dT%H:%M:%S")
with self._get_client() as c:
# Get eligible snapshots to clean
image_snaps = c.get_image_snapshots_to_date(date)
if image_snaps:
for snap in image_snaps:
uuid = snap['uuid']['uuid']
LOG.debug(
'Cache cleanup: deleting image snapshot %s', uuid)
try:
c.delete_snapshot(uuid)
except Exception:
LOG.exception(_LE('Unexpected exception during '
'cache cleanup of snapshot %s'),
uuid)
else:
LOG.debug('Cache cleanup: nothing to clean')
finally:
self.cache_cleanup = False
LOG.debug('Cache cleanup: finished')
raise loopingcall.LoopingCallDone()
def _update_volume_stats(self):
"""Retrieves stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.VENDOR
data['vendor_name'] = self.VENDOR
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
self._initiate_image_cache_cleanup()
pools = []
for share in self._mounted_shares:
pool = dict()
capacity, free, used = self._get_capacity_info(share)
pool['pool_name'] = share
pool['total_capacity_gb'] = capacity / float(units.Gi)
pool['free_capacity_gb'] = free / float(units.Gi)
pool['reserved_percentage'] = 0
pool['QoS_support'] = True
pools.append(pool)
data['pools'] = pools
self._stats = data
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
volume = self.db.volume_get(self._context, volume_id)
return volume.provider_location
def _get_host_ip(self, volume_id):
"""Returns IP address for the given volume."""
return self._get_provider_location(volume_id).split(':')[0]
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
return self._get_provider_location(volume_id).split(':')[1]
def _resolve_hostname(self, hostname):
"""Resolves host name to IP address."""
res = socket.getaddrinfo(hostname, None)[0]
family, socktype, proto, canonname, sockaddr = res
return sockaddr[0]
def _is_volume_present(self, volume_path):
"""Checks if volume exists."""
try:
self._execute('ls', volume_path,
run_as_root=self._execute_as_root)
except Exception:
return False
return True
def _get_volume_path(self, nfs_share, volume_name):
"""Gets local volume path for given volume name on given nfs share."""
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_size = volume.size
src_vol_size = src_vref.size
self._clone_volume_to_volume(src_vref.name, volume.name,
src_vref.display_name,
src_vref.id)
share = self._get_provider_location(src_vref.id)
volume['provider_location'] = share
path = self.local_path(volume)
self._set_rw_permissions(path)
if vol_size != src_vol_size:
try:
self.extend_volume(volume, vol_size)
except Exception:
LOG.error(_LE('Resizing %s failed. Cleaning volume.'),
volume.name)
self._delete_file(path)
raise
return {'provider_location': volume['provider_location']}
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetches the image from image_service and write it to the volume."""
super(TintriDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
LOG.info(_LI('Copied image to volume %s using regular download.'),
volume['name'])
self._create_image_snapshot(volume['name'],
volume['provider_location'], image_id,
img_prefix + image_id)
def _create_image_snapshot(self, volume_name, share, image_id, image_name):
"""Creates an image snapshot."""
snapshot_name = img_prefix + image_id
LOG.info(_LI('Creating image snapshot %s'), snapshot_name)
(host, path) = self._get_export_ip_path(None, share)
volume_path = '%s/%s' % (path, volume_name)
@utils.synchronized(snapshot_name, external=True)
def _do_snapshot():
with self._get_client() as c:
snapshot_id = c.get_snapshot(image_id)
if not snapshot_id:
snapshot_id = c.create_snapshot(volume_path, image_name,
image_id, snapshot_name)
return snapshot_id
try:
return _do_snapshot()
except Exception as e:
LOG.warning(_LW('Exception while creating image %(image_id)s '
'snapshot. Exception: %(exc)s'),
{'image_id': image_id, 'exc': e})
def _find_image_snapshot(self, image_id):
"""Finds image snapshot."""
with self._get_client() as c:
return c.get_snapshot(image_id)
def _clone_image_snapshot(self, snapshot_id, dst, share):
"""Clones volume from image snapshot."""
file_path = self._get_volume_path(share, dst)
if not os.path.exists(file_path):
LOG.info(_LI('Cloning from snapshot to destination %s'), dst)
self._clone_snapshot(snapshot_id, dst, volume_id=None,
share=share)
def _delete_file(self, path):
"""Deletes file from disk and return result as boolean."""
try:
LOG.debug('Deleting file at path %s', path)
cmd = ['rm', '-f', path]
self._execute(*cmd, run_as_root=self._execute_as_root)
return True
except Exception as e:
LOG.warning(_LW('Exception during deleting %s'), e)
return False
def _move_file(self, source_path, dest_path):
"""Moves source to destination."""
@utils.synchronized(dest_path, external=True)
def _do_move(src, dst):
if os.path.exists(dst):
LOG.warning(_LW('Destination %s already exists.'), dst)
return False
self._execute('mv', src, dst, run_as_root=self._execute_as_root)
return True
try:
return _do_move(source_path, dest_path)
except Exception as e:
LOG.warning(_LW('Exception moving file %(src)s. Message: %(e)s'),
{'src': source_path, 'e': e})
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Creates a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred.
"""
image_name = image_meta['name']
image_id = image_meta['id']
if 'properties' in image_meta:
provider_location = image_meta['properties'].get(
'provider_location')
if provider_location:
image_location = (provider_location, None)
cloned = False
post_clone = False
try:
snapshot_id = self._find_image_snapshot(image_id)
if snapshot_id:
cloned = self._clone_from_snapshot(volume, image_id,
snapshot_id)
else:
cloned = self._direct_clone(volume, image_location,
image_id, image_name)
if cloned:
post_clone = self._post_clone_image(volume)
except Exception as e:
LOG.info(_LI('Image cloning unsuccessful for image '
'%(image_id)s. Message: %(msg)s'),
{'image_id': image_id, 'msg': e})
vol_path = self.local_path(volume)
volume['provider_location'] = None
if os.path.exists(vol_path):
self._delete_file(vol_path)
finally:
cloned = cloned and post_clone
share = volume['provider_location'] if cloned else None
bootable = True if cloned else False
return {'provider_location': share, 'bootable': bootable}, cloned
def _clone_from_snapshot(self, volume, image_id, snapshot_id):
"""Clones a copy from image snapshot."""
cloned = False
LOG.info(_LI('Cloning image %s from snapshot.'), image_id)
for share in self._mounted_shares:
# Repeat tries in other shares if failed in some
LOG.debug('Image share: %s', share)
if (share and
self._is_share_vol_compatible(volume, share)):
try:
self._clone_image_snapshot(snapshot_id, volume['name'],
share)
cloned = True
volume['provider_location'] = share
break
except Exception:
LOG.warning(_LW('Unexpected exception during '
'image cloning in share %s'), share)
return cloned
def _direct_clone(self, volume, image_location, image_id, image_name):
"""Clones directly in nfs share."""
LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
cloned = False
image_location = self._get_image_nfs_url(image_location)
share = self._is_cloneable_share(image_location)
run_as_root = self._execute_as_root
dst_share = None
for dst in self._mounted_shares:
if dst and self._is_share_vol_compatible(volume, dst):
dst_share = dst
LOG.debug('Image dst share: %s', dst)
break
if not dst_share:
return cloned
LOG.debug('Share is cloneable %s', dst_share)
volume['provider_location'] = dst_share
(__, ___, img_file) = image_location.rpartition('/')
dir_path = self._get_mount_point_for_share(share)
dst_path = self._get_mount_point_for_share(dst_share)
img_path = '%s/%s' % (dir_path, img_file)
img_info = image_utils.qemu_img_info(img_path,
run_as_root=run_as_root)
if img_info.file_format == 'raw':
LOG.debug('Image is raw %s', image_id)
self._clone_volume_to_volume(
img_file, volume['name'], image_name,
volume_id=None, share=share, dst=dst_share, image_id=image_id)
cloned = True
else:
LOG.info(_LI('Image will locally be converted to raw %s'),
image_id)
dst = '%s/%s' % (dst_path, volume['name'])
image_utils.convert_image(img_path, dst, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst, run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_('Converted to raw, but '
'format is now %s') % data.file_format)
else:
cloned = True
self._create_image_snapshot(
volume['name'], volume['provider_location'],
image_id, image_name)
return cloned
def _post_clone_image(self, volume):
"""Performs operations post image cloning."""
LOG.info(_LI('Performing post clone for %s'), volume['name'])
vol_path = self.local_path(volume)
self._set_rw_permissions(vol_path)
self._resize_image_file(vol_path, volume['size'])
return True
def _resize_image_file(self, path, new_size):
"""Resizes the image file on share to new size."""
LOG.debug('Checking file for resize.')
if self._is_file_size_equal(path, new_size):
return
else:
LOG.info(_LI('Resizing file to %sG'), new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if self._is_file_size_equal(path, new_size):
return
else:
raise exception.InvalidResults(
_('Resizing image file failed.'))
def _is_cloneable_share(self, image_location):
"""Finds if the image at location is cloneable."""
conn, dr = self._check_nfs_path(image_location)
return self._is_share_in_use(conn, dr)
def _check_nfs_path(self, image_location):
"""Checks if the nfs path format is matched.
WebNFS url format with relative-path is supported.
Accepting all characters in path-names and checking against
the mounted shares which will contain only allowed path segments.
Returns connection and dir details.
"""
conn, dr = None, None
if image_location:
nfs_loc_pattern = \
'^nfs://(([\w\-\.]+:[\d]+|[\w\-\.]+)(/[^/].*)*(/[^/\\\\]+))$'
matched = re.match(nfs_loc_pattern, image_location)
if not matched:
LOG.debug('Image location not in the expected format %s',
image_location)
else:
conn = matched.group(2)
dr = matched.group(3) or '/'
return conn, dr
def _is_share_in_use(self, conn, dr):
"""Checks if share is cinder mounted and returns it."""
try:
if conn:
host = conn.split(':')[0]
ip = self._resolve_hostname(host)
for sh in self._mounted_shares + self._mounted_image_shares:
sh_ip = self._resolve_hostname(sh.split(':')[0])
sh_exp = sh.split(':')[1]
if sh_ip == ip and sh_exp == dr:
LOG.debug('Found share match %s', sh)
return sh
except Exception:
LOG.warning(_LW('Unexpected exception while listing used share.'))
def _get_image_nfs_url(self, image_location):
"""Gets direct url for nfs backend.
It creates direct url from image_location
which is a tuple with direct_url and locations.
Returns url with nfs scheme if nfs store else returns url.
It needs to be verified by backend before use.
"""
direct_url, locations = image_location
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
# Locations will be always a list of one until
# bp multiple-image-locations is introduced
if not locations:
return direct_url
location = locations[0]
url = location['url']
if not location['metadata']:
return url
location_type = location['metadata'].get('type')
if not location_type or location_type.lower() != "nfs":
return url
share_location = location['metadata'].get('share_location')
mount_point = location['metadata'].get('mount_point')
if not share_location or not mount_point:
return url
url_parse = urllib.parse.urlparse(url)
abs_path = os.path.join(url_parse.netloc, url_parse.path)
rel_path = os.path.relpath(abs_path, mount_point)
direct_url = "%s/%s" % (share_location, rel_path)
return direct_url
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
return self._is_share_eligible(share, volume['size'])
def _can_share_hold_size(self, share, size):
"""Checks if volume can hold image with size."""
_tot_size, tot_available, _tot_allocated = self._get_capacity_info(
share)
if tot_available < size:
msg = _('Container size smaller than required file size.')
raise exception.VolumeDriverException(msg)
def _get_export_ip_path(self, volume_id=None, share=None):
"""Returns export ip and path.
One of volume id or share is used to return the values.
"""
if volume_id:
host_ip = self._get_host_ip(volume_id)
export_path = self._get_export_path(volume_id)
elif share:
host_ip = share.split(':')[0]
export_path = share.split(':')[1]
else:
raise exception.InvalidInput(
reason=_('A volume ID or share was not specified.'))
return host_ip, export_path
def _get_local_path(self, volume_id=None, share=None):
"""Returns local path.
One of volume id or share is used to return the values.
"""
if volume_id:
local_path = self._get_mount_point_for_share(
self._get_provider_location(volume_id))
elif share:
local_path = self._get_mount_point_for_share(share)
else:
raise exception.InvalidInput(
reason=_('A volume ID or share was not specified.'))
return local_path
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref)
LOG.debug('Managing volume %(vol)s with ref %(ref)s',
{'vol': volume['id'], 'ref': existing_ref})
if volume_name != volume['name']:
src = os.path.join(nfs_mount, volume_name)
dst = os.path.join(nfs_mount, volume['name'])
if not self._move_file(src, dst):
msg = (_('Failed to manage volume %s.') %
existing_ref['source-name'])
raise exception.VolumeDriverException(msg)
self._set_rw_permissions(dst)
LOG.info(_LI('Manage volume %s'), volume['name'])
return {'provider_location': nfs_share}
def manage_existing_get_size(self, volume, existing_ref):
"""Returns size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
nfs_share, nfs_mount, volume_name = self._get_share_mount(existing_ref)
try:
volume_path = os.path.join(nfs_mount, volume_name)
vol_size = int(math.ceil(float(utils.get_file_size(volume_path)) /
units.Gi))
except OSError:
msg = (_('Failed to get size of volume %s') %
existing_ref['source-name'])
raise exception.VolumeDriverException(msg)
return vol_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
volume_path = self.local_path(volume)
LOG.info(_LI('Unmanage volume %s'), volume_path)
def _convert_volume_share(self, volume_share):
"""Converts the share name to IP address."""
share_split = volume_share.rsplit(':', 1)
return self._resolve_hostname(share_split[0]) + ':' + share_split[1]
def _get_share_mount(self, vol_ref):
"""Get the NFS share, NFS mount, and volume path from reference.
:param vol_ref: Driver-specific information used to identify a volume
:return: NFS Share, NFS mount, volume path
"""
if 'source-name' not in vol_ref or not vol_ref['source-name']:
msg = _('Volume reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=msg)
volume_share = self._convert_volume_share(vol_ref['source-name'])
for nfs_share in self._mounted_shares:
share = self._convert_volume_share(nfs_share)
(__, match, volume_name) = volume_share.partition(share)
if match == share:
volume_name = volume_name.lstrip('/')
nfs_mount = self._get_mount_point_for_share(nfs_share)
volume_path = os.path.join(nfs_mount, volume_name)
if os.path.isfile(volume_path):
LOG.debug('Found volume %(path)s on share %(share)s',
{'path': volume_path, 'share': nfs_share})
return nfs_share, nfs_mount, volume_name
else:
LOG.debug('Volume ref %(ref)s not on share %(share)s',
{'ref': vol_ref, 'share': nfs_share})
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=_('Volume not found.'))
def retype(self, context, volume, new_type, diff, host):
"""Retype from one volume type to another.
At this point Tintri VMstore does not differentiate between
volume types on the same array. This is a no-op for us.
"""
return True, None
def _ensure_shares_mounted(self):
# Mount image shares, we do not need to store these mounts
# in _mounted_shares
mounted_image_shares = []
if self._image_shares_config:
self._load_shares_config(self._image_shares_config)
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_image_shares.append(share)
except Exception:
LOG.exception(_LE(
'Exception during mounting.'))
self._mounted_image_shares = mounted_image_shares
# Mount Cinder shares
super(TintriDriver, self)._ensure_shares_mounted()
class TClient(object):
"""REST client for Tintri storage."""
def __init__(self, hostname, username, password,
api_version=default_api_version):
"""Initializes a connection to Tintri server."""
self.api_url = 'https://' + hostname + '/api'
self.api_version = api_version
self.session_id = self.login(username, password)
self.headers = {'content-type': 'application/json',
'cookie': 'JSESSIONID=' + self.session_id}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.logout()
def get(self, api):
return self.get_query(api, None)
def get_query(self, api, query):
url = self.api_url + api
return requests.get(url, headers=self.headers,
params=query, verify=False)
def delete(self, api):
url = self.api_url + api
return requests.delete(url, headers=self.headers, verify=False)
def put(self, api, payload):
url = self.api_url + api
return requests.put(url, data=json.dumps(payload),
headers=self.headers, verify=False)
def post(self, api, payload):
url = self.api_url + api
return requests.post(url, data=json.dumps(payload),
headers=self.headers, verify=False)
def login(self, username, password):
# Payload, header and URL for login
headers = {'content-type': 'application/json',
'Tintri-Api-Client':
'Tintri-Cinder-Driver-%s' % TintriDriver.VERSION}
payload = {'username': username,
'password': password,
'typeId': 'com.tintri.api.rest.vcommon.dto.rbac.'
'RestApiCredentials'}
url = self.api_url + '/' + self.api_version + '/session/login'
r = requests.post(url, data=json.dumps(payload),
headers=headers, verify=False)
if r.status_code != 200:
msg = _('Failed to login for user %s.') % username
raise exception.VolumeDriverException(msg)
return r.cookies['JSESSIONID']
def logout(self):
url = self.api_url + '/' + self.api_version + '/session/logout'
requests.get(url, headers=self.headers, verify=False)
@staticmethod
def _remove_prefix(volume_path, prefix):
if volume_path.startswith(prefix):
return volume_path[len(prefix):]
else:
return volume_path
def create_snapshot(self, volume_path, volume_name, volume_id,
snapshot_name, deletion_policy=None):
"""Creates a volume snapshot."""
request = {'typeId': 'com.tintri.api.rest.' + self.api_version +
'.dto.domain.beans.cinder.CinderSnapshotSpec',
'file': TClient._remove_prefix(volume_path, tintri_path),
'vmName': volume_name or snapshot_name,
'description': snapshot_name + ' (' + volume_id + ')',
'vmTintriUuid': volume_id,
'instanceId': volume_id,
'snapshotCreator': 'Cinder',
'deletionPolicy': deletion_policy,
}
payload = '/' + self.api_version + '/cinder/snapshot'
r = self.post(payload, request)
if r.status_code != 200:
msg = _('Failed to create snapshot for volume %s.') % volume_path
raise exception.VolumeDriverException(msg)
return r.json()[0]
def get_snapshot(self, volume_id):
"""Gets a volume snapshot."""
filter = {'vmUuid': volume_id}
payload = '/' + self.api_version + '/snapshot'
r = self.get_query(payload, filter)
if r.status_code != 200:
msg = _('Failed to get snapshot for volume %s.') % volume_id
raise exception.VolumeDriverException(msg)
if int(r.json()['filteredTotal']) > 0:
return r.json()['items'][0]['uuid']['uuid']
def get_image_snapshots_to_date(self, date):
filter = {'sortedBy': 'createTime',
'target': 'SNAPSHOT',
'consistency': 'CRASH_CONSISTENT',
'hasClone': 'No',
'type': 'CINDER_GENERATED_SNAPSHOT',
'contain': 'image-',
'limit': '100',
'page': '1',
'sortOrder': 'DESC',
'since': '1970-01-01T00:00:00',
'until': date,
}
payload = '/' + self.api_version + '/snapshot'
r = self.get_query(payload, filter)
if r.status_code != 200:
msg = _('Failed to get image snapshots.')
raise exception.VolumeDriverException(msg)
return r.json()['items']
def delete_snapshot(self, snapshot_uuid):
"""Deletes a snapshot."""
url = '/' + self.api_version + '/snapshot/'
self.delete(url + snapshot_uuid)
def clone_volume(self, snapshot_uuid, volume_path):
"""Clones a volume from snapshot."""
request = {'typeId': 'com.tintri.api.rest.' + self.api_version +
'.dto.domain.beans.cinder.CinderCloneSpec',
'destinationPaths':
[TClient._remove_prefix(volume_path, tintri_path)],
'tintriSnapshotUuid': snapshot_uuid,
}
url = '/' + self.api_version + '/cinder/clone'
r = self.post(url, request)
if r.status_code != 200 and r.status_code != 204:
msg = _('Failed to clone volume from snapshot %s.') % snapshot_uuid
raise exception.VolumeDriverException(msg)
|
|
import sublime
import sublime_plugin
import string
import re
import sys
import time
import base64
import html.entities
from cgi import escape
from hashlib import md5,sha1
from datetime import datetime
from random import sample, choice, randrange
import os, socket, urllib
import binascii
import json
import pprint
if sys.hexversion >= 0x3000000:
def unichr(c):
chr(c)
class ConvertTabsToSpacesCommand(sublime_plugin.TextCommand):
#Convert Tabs To Spaces
def run(self, edit):
sublime.status_message('Convert tabs to spaces.')
tab_size = int(self.view.settings().get('tab_size', 4))
for region in self.view.sel():
if not region.empty():
self.view.replace(edit, region, self.view.substr(region).expandtabs(tab_size))
else:
self.view.run_command('select_all')
self.view.replace(edit, self.view.sel()[0], self.view.substr(self.view.sel()[0]).expandtabs(tab_size))
self.view.sel().clear()
class ConvertSpacesToTabsCommand(sublime_plugin.TextCommand):
#Convert Spaces To Tabs
def run(self, edit):
sublime.status_message('Convert spaces to tabs.')
tab_size = str(self.view.settings().get('tab_size', 4))
for region in self.view.sel():
if not region.empty():
self.view.replace(edit, region, re.sub(r' {' + tab_size + r'}', r'\t', self.view.substr(region)))
else:
self.view.run_command('select_all')
self.view.replace(edit, self.view.sel()[0], re.sub(r' {' + tab_size + r'}', r'\t', self.view.substr(self.view.sel()[0])))
self.view.sel().clear()
class ConvertCharsToHtmlCommand(sublime_plugin.TextCommand):
#Convert Chars into XML/HTML Entities
def run(self, edit):
for region in self.view.sel():
if not region.empty():
self.view.replace(edit, region, escape(self.view.substr(region), True))
class ConvertHtmlToCharsCommand(sublime_plugin.TextCommand):
#Convert XML/HTML Entities into Chars
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = re.sub('&(%s);' % '|'.join(htmlentitydefs.name2codepoint),
lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]), self.view.substr(region))
self.view.replace(edit, region, text)
class ConvertCamelUnderscoresCommand(sublime_plugin.TextCommand):
#Convert CamelCase to under_scores and vice versa
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text = self.toCamelCase(text) if '_' in text else self.toUnderscores(text)
self.view.replace(edit, region, text)
def toUnderscores(self, name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def toCamelCase(self, name):
return ''.join(map(lambda x: x.capitalize(), name.split('_')))
class ConvertToUnicodeNotationCommand(sublime_plugin.TextCommand):
#Convert string to Unicode notation
def run(self, edit):
pattern = re.compile(r'\s+')
for region in self.view.sel():
if not region.empty():
text = ''
for c in self.view.substr(region):
if not re.match(pattern, c) and (ord(c) < 0x20 or ord(c) > 0x7e):
text += '\\u{0:04X}'.format(ord(c))
else:
text += c
self.view.replace(edit, region, text)
class ConvertFromUnicodeNotationCommand(sublime_plugin.TextCommand):
#Convert string from Unicode notation
def run(self, edit):
pattern = re.compile(r'(\\u)([0-9a-fA-F]{2,4})')
for region in self.view.sel():
if not region.empty():
text = re.sub(pattern, lambda m: unichr(int(m.group(2), 16)), self.view.substr(region))
self.view.replace(edit, region, text)
class ConvertToBase64Command(sublime_plugin.TextCommand):
#Encode string with base64
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = base64.b64encode(text)
txt = str(t,'ascii')
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertFromBase64Command(sublime_plugin.TextCommand):
#Decode string with base64
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = base64.b64decode(text)
txt = str(t,'ascii')
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertToHexCommand(sublime_plugin.TextCommand):
#Convert string to hex
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = binascii.hexlify(text)
txt = str(t,'ascii')
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertFromHexCommand(sublime_plugin.TextCommand):
#Convert string from hex
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
t = binascii.unhexlify(text)
txt = str(t,'ascii')
self.view.replace(edit, region, txt)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertHexToRgbCommand(sublime_plugin.TextCommand):
#Convert hex to rgb color
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
self.view.replace(edit, region, self.hex_to_rgb(text))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def hex_to_rgb(self, value):
value = value.lstrip('#')
lv = len(value)
if lv == 6:
rgb = tuple(str(int(value[i:i+lv/3], 16)) for i in range(0, lv, lv/3))
if lv == 3:
rgb = tuple(str(int(value[i:i+1], 16)*17) for i in range(0, 3))
if lv == 1:
v = str(int(value, 16)*17)
rgb = v, v, v
return 'rgb(' + ','.join(rgb) + ')'
class ConvertRgbToHexCommand(sublime_plugin.TextCommand):
#Convert rgb to hex color
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
str_len = len(text)
reg_rgb = '^rgb[a]?\((\s*\d+\s*),(\s*\d+\s*),(\s*\d+\s*),?(\s*(0?.?\d)+\s*)?\)$'
rgb_match = re.match(reg_rgb, text)
if rgb_match is not None:
self.view.replace(edit, region, self.rgb_to_hex(rgb_match))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def rgb_to_hex(self, rgb_match):
"""Converts an rgb(a) value to a hex value.
Attributes:
self: The Regionset object.
rgb_match: The reg exp collection of matches.
"""
# Convert all values to 10-base integers, strip the leading characters,
# convert to hex and fill with leading zero's.
val_1 = hex(int(rgb_match.group(1), 10))[2:].zfill(2)
val_2 = hex(int(rgb_match.group(2), 10))[2:].zfill(2)
val_3 = hex(int(rgb_match.group(3), 10))[2:].zfill(2)
# Return the proformatted string with the new values.
return '#%s%s%s' % (val_1, val_2, val_3)
class ConvertMd5Command(sublime_plugin.TextCommand):
#Calculate MD5 hash
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
self.view.replace(edit, region, md5(text).hexdigest())
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertSha1Command(sublime_plugin.TextCommand):
#Calculate SHA1 hash
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).encode(self.enc())
self.view.replace(edit, region, sha1(text).hexdigest())
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class ConvertTimeFormatCommand(sublime_plugin.TextCommand):
#This will allow you to convert epoch to human readable date
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
if re.match('^([0-9]+)$', text):
result = self.from_unix(text)
else:
result = self.to_unix(text)
if result:
self.view.replace(edit, region, result)
else:
sublime.status_message('Convert error.')
def from_unix(self, timestamp):
sublime.status_message('Convert from epoch to human readable date.')
timestamp = float(timestamp)
stamp = datetime.fromtimestamp(timestamp)
return stamp.strftime("%Y-%m-%d %H:%M")
def to_unix(self, timestr):
sublime.status_message('Convert from human readable date to epoch.')
try:
return '%d' % (time.mktime(timestr.timetuple()))
except:
return False
class InsertTimestampCommand(sublime_plugin.TextCommand):
#This will allow you to insert timestamp to current position
def run(self, edit):
for region in self.view.sel():
self.view.insert(edit, region.begin(), datetime.now().strftime("%Y-%m-%d %H:%M"))
class GeneratePasswordCommand(sublime_plugin.TextCommand):
chars = "23456789abcdefghijkmnpqrstuvwxyzABCDEFGHKMNPQRSTUVWXYZ"
def run(self, edit, length=16):
length = int(length)
self.view.insert(edit, self.view.sel()[0].begin(), ''.join(sample(self.chars, length)))
class StringUtilitiesExtIpCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = "http://api.long.ge/sublimetext/ip.php"
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
for region in self.view.sel():
self.view.insert(edit, region.begin(), response.read().decode(self.enc()))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class StringUtilitiesIntIpCommand(sublime_plugin.TextCommand):
def run(self, edit):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('google.com', 0))
int_ip = s.getsockname()[0]
s.close()
for region in self.view.sel():
self.view.insert(edit, region.begin(), int_ip)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
class StringUtilitiesDecodeJsonCommand(sublime_plugin.TextCommand):
output = ""
i = 0
def run(self, edit):
for region in self.view.sel():
self.output = ""
if not region.empty():
text = self.view.substr(region).encode(self.enc())
text = str(text, 'utf8')
data = json.loads(text, encoding='utf8')
self.recursive_print(data)
#print(self.output)
#pp = pprint.PrettyPrinter(indent=4, width=1)
#data = pp.pformat(data)
#data = self.output
#data = data.replace('{ ', '{')
#data = data.replace('{', '\n {\n')
self.view.replace(edit, region, self.output)
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
def recursive_print(self, src, dpth = 0, key = ''):
""" Recursively prints nested elements."""
tabs = lambda n: '\t' * n * 1 # or 2 or 8 or...
brace = lambda s, n: '%s%s%s' % ('['*n, s, ']'*n)
if isinstance(src, dict):
for key, value in src.items():
if isinstance(value, dict) or (isinstance(value, list)):
self.output += tabs(dpth) + brace(key, dpth) + "\n"
self.recursive_print(value, dpth + 1, key)
elif (isinstance(src, list)):
self.i = 0
for litem in src:
self.recursive_print(litem, dpth + 1)
else:
if key:
self.output += tabs(dpth) + '[%s] => %s' % (key, src) + "\n"
else:
self.i = self.i + 1
self.output += tabs(dpth) + str(self.i) + ' => %s' % src + "\n"
class StringUtilitiesTestCommand(sublime_plugin.TextCommand):
def run(self, edit):
ext_ip = urllib2.urlopen('http://api.long.ge/sublimetext/ip.php').read()
for region in self.view.sel():
self.view.insert(edit, region.begin(), ext_ip.encode(self.enc()))
def enc(self):
if self.view.encoding() == 'Undefined':
return self.view.settings().get('default_encoding', 'UTF-8')
else:
return self.view.encoding()
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.tests.menu_page_viewperm import ViewPermissionTests
from cms.utils.compat.dj import get_user_model
__all__ = [
'ViewPermissionComplexMenuStaffNodeTests',
]
class ViewPermissionComplexMenuStaffNodeTests(ViewPermissionTests):
"""
Test CMS_PUBLIC_FOR=staff group access and menu nodes rendering
"""
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'staff',
}
def test_public_pages_anonymous_norestrictions(self):
"""
All pages are INVISIBLE to an anonymous user
"""
all_pages = self._setup_tree_pages()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_public_menu_anonymous_user(self):
"""
Anonymous sees nothing, as he is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_node_staff_access_page_and_children_group_1(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1')
# user 1 is member of group_b_access_page_and_children
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_1@django-cms.org')
else:
user = get_user_model().objects.get(username='user_1')
urls = self.get_url_dict(all_pages)
# call /
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_children_group_1_no_staff(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
no staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_1_nostaff@django-cms.org')
else:
user = get_user_model().objects.get(username='user_1_nostaff')
urls = self.get_url_dict(all_pages)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_children_group_2(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_2@django-cms.org')
else:
user = get_user_model().objects.get(username='user_2')
urls = self.get_url_dict(all_pages)
self.assertViewNotAllowed(urls['/en/page_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_a/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/'], user)
self.assertViewNotAllowed(urls['/en/page_d/'], user)
self.assertViewAllowed(urls['/en/page_d/page_d_a/'], user)
#
def test_node_staff_access_children_group_2_nostaff(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b_b_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_2_nostaff@django-cms.org')
else:
user = get_user_model().objects.get(username='user_2_nostaff')
urls = self.get_url_dict(all_pages)
# member of group that has access to this page
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_descendants_group_3(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_3@django-cms.org')
else:
user = get_user_model().objects.get(username='user_3')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_page_and_descendants_group_3_nostaff(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
user is not staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_3_nostaff@django-cms.org')
else:
user = get_user_model().objects.get(username='user_3_nostaff')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_descendants_group_4(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_4@django-cms.org')
else:
user = get_user_model().objects.get(username='user_4')
urls = self.get_url_dict(all_pages)
# call /
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
# not a direct child
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_descendants_group_4_nostaff(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_4_nostaff@django-cms.org')
else:
user = get_user_model().objects.get(username='user_4_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_page_group_5(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_c_a',
'page_c_b',
'page_d',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_5')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_5@django-cms.org')
else:
user = get_user_model().objects.get(username='user_5')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_group_5_nostaff(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
nostaff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_d',]
self.assertGrantedVisibility(all_pages, granted, username='user_5_nostaff')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_5_nostaff@django-cms.org')
else:
user = get_user_model().objects.get(username='user_5_nostaff')
urls = self.get_url_dict(all_pages)
url = self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
|
|
from StringIO import StringIO
from sympy.core import symbols, Eq, pi, Catalan, Lambda, Dummy
from sympy.utilities.codegen import CCodeGen, Routine, InputArgument, Result, \
CodeGenError, FCodeGen, codegen, CodeGenArgumentListError, OutputArgument, \
InOutArgument
from sympy.utilities.pytest import XFAIL, raises
from sympy.utilities.lambdify import implemented_function
# import test:
#FIXME: Fails due to circular import in with core
# from sympy import codegen
#FIXME-py3k: Many AssertionErrors here, perhaps related to unicode;
#FIXME-py3k: some are just due to an extra space at the end of the string
def get_string(dump_fn, routines, prefix="file", header=False, empty=False):
"""Wrapper for dump_fn. dump_fn writes its results to a stream object and
this wrapper returns the contents of that stream as a string. This
auxiliary function is used by many tests below.
The header and the empty lines are not generator to facilitate the
testing of the output.
"""
output = StringIO()
dump_fn(routines, output, prefix, header, empty)
source = output.getvalue()
output.close()
return source
def test_Routine_argument_order():
a, x, y, z = symbols('a x y z')
expr = (x+y)*z
raises(CodeGenArgumentListError, 'Routine("test", expr, argument_sequence=[z, x])')
raises(CodeGenArgumentListError, 'Routine("test", Eq(a, expr), argument_sequence=[z, x, y])')
r = Routine('test', Eq(a, expr), argument_sequence=[z, x, a, y])
assert [ arg.name for arg in r.arguments ] == [z, x, a, y]
assert [ type(arg) for arg in r.arguments ] == [
InputArgument, InputArgument, OutputArgument, InputArgument ]
r = Routine('test', Eq(z, expr), argument_sequence=[z, x, y])
assert [ type(arg) for arg in r.arguments ] == [
InOutArgument, InputArgument, InputArgument ]
from sympy.tensor import IndexedBase, Idx
A, B = map(IndexedBase, ['A', 'B'])
m = symbols('m', integer=True)
i = Idx('i', m)
r = Routine('test', Eq(A[i], B[i]), argument_sequence=[B, A, m])
assert [ arg.name for arg in r.arguments ] == [B.label, A.label, m]
def test_empty_c_code():
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [])
assert source == "#include \"file.h\"\n#include <math.h>\n"
def test_empty_c_code_with_comment():
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [], header=True)
assert source[:82] == (
"/******************************************************************************\n *"
)
# " Code generated with sympy 0.7.0-git "
assert source[158:] == ( "*\n"
" * *\n"
" * See http://www.sympy.org/ for more information. *\n"
" * *\n"
" * This file is part of 'project' *\n"
" ******************************************************************************/\n"
"#include \"file.h\"\n"
"#include <math.h>\n"
)
def test_empty_c_header():
code_gen = CCodeGen()
source = get_string(code_gen.dump_h, [])
assert source == "#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n#endif\n"
def test_simple_c_code():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double x, double y, double z) {\n"
" return z*(x + y);\n"
"}\n"
)
assert source == expected
def test_numbersymbol_c_code():
routine = Routine("test", pi**Catalan)
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test() {\n"
" double const Catalan = 0.915965594177219;\n"
" return pow(M_PI, Catalan);\n"
"}\n"
)
assert source == expected
def test_c_code_argument_order():
x,y,z = symbols('x,y,z')
expr = x + y
routine = Routine("test", expr, argument_sequence=[z, x, y])
code_gen = CCodeGen()
source = get_string(code_gen.dump_c, [routine])
expected = (
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double z, double x, double y) {\n"
" return x + y;\n"
"}\n"
)
assert source == expected
def test_simple_c_header():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = CCodeGen()
source = get_string(code_gen.dump_h, [routine])
expected = (
"#ifndef PROJECT__FILE__H\n"
"#define PROJECT__FILE__H\n"
"double test(double x, double y, double z);\n"
"#endif\n"
)
assert source == expected
def test_simple_c_codegen():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
result = codegen(("test", (x+y)*z), "C", "file", header=False, empty=False)
expected = [
("file.c",
"#include \"file.h\"\n"
"#include <math.h>\n"
"double test(double x, double y, double z) {\n"
" return z*(x + y);\n"
"}\n"),
("file.h",
"#ifndef PROJECT__FILE__H\n"
"#define PROJECT__FILE__H\n"
"double test(double x, double y, double z);\n"
"#endif\n")
]
assert result == expected
def test_multiple_results_c():
x,y,z = symbols('x,y,z')
expr1 = (x+y)*z
expr2 = (x-y)*z
routine = Routine(
"test",
[expr1,expr2]
)
code_gen = CCodeGen()
raises(CodeGenError, 'get_string(code_gen.dump_h, [routine])')
def test_no_results_c():
raises(ValueError, 'Routine("test", [])')
def test_ansi_math1_codegen():
# not included: log10
from sympy import (acos, asin, atan, ceiling, cos, cosh, floor, log, ln,
sin, sinh, sqrt, tan, tanh, N, Abs)
x = symbols('x')
name_expr = [
("test_fabs", Abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
("test_ceil", ceiling(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
("test_floor", floor(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test_fabs(double x) {\n return fabs(x);\n}\n'
'double test_acos(double x) {\n return acos(x);\n}\n'
'double test_asin(double x) {\n return asin(x);\n}\n'
'double test_atan(double x) {\n return atan(x);\n}\n'
'double test_ceil(double x) {\n return ceil(x);\n}\n'
'double test_cos(double x) {\n return cos(x);\n}\n'
'double test_cosh(double x) {\n return cosh(x);\n}\n'
'double test_floor(double x) {\n return floor(x);\n}\n'
'double test_log(double x) {\n return log(x);\n}\n'
'double test_ln(double x) {\n return log(x);\n}\n'
'double test_sin(double x) {\n return sin(x);\n}\n'
'double test_sinh(double x) {\n return sinh(x);\n}\n'
'double test_sqrt(double x) {\n return sqrt(x);\n}\n'
'double test_tan(double x) {\n return tan(x);\n}\n'
'double test_tanh(double x) {\n return tanh(x);\n}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n'
'double test_fabs(double x);\ndouble test_acos(double x);\n'
'double test_asin(double x);\ndouble test_atan(double x);\n'
'double test_ceil(double x);\ndouble test_cos(double x);\n'
'double test_cosh(double x);\ndouble test_floor(double x);\n'
'double test_log(double x);\ndouble test_ln(double x);\n'
'double test_sin(double x);\ndouble test_sinh(double x);\n'
'double test_sqrt(double x);\ndouble test_tan(double x);\n'
'double test_tanh(double x);\n#endif\n'
)
def test_ansi_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
x, y = symbols('x,y')
name_expr = [
("test_atan2", atan2(x,y)),
("test_pow", x**y),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test_atan2(double x, double y) {\n return atan2(x, y);\n}\n'
'double test_pow(double x, double y) {\n return pow(x, y);\n}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n#define PROJECT__FILE__H\n'
'double test_atan2(double x, double y);\n'
'double test_pow(double x, double y);\n'
'#endif\n'
)
def test_complicated_codegen():
from sympy import sin, cos, tan, N
x,y,z = symbols('x,y,z')
name_expr = [
("test1", ((sin(x)+cos(y)+tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x+y+z))))))))),
]
result = codegen(name_expr, "C", "file", header=False, empty=False)
assert result[0][0] == "file.c"
assert result[0][1] == (
'#include "file.h"\n#include <math.h>\n'
'double test1(double x, double y, double z) {\n'
' return '
'pow(sin(x), 7) + '
'7*pow(sin(x), 6)*cos(y) + '
'7*pow(sin(x), 6)*tan(z) + '
'21*pow(sin(x), 5)*pow(cos(y), 2) + '
'42*pow(sin(x), 5)*cos(y)*tan(z) + '
'21*pow(sin(x), 5)*pow(tan(z), 2) + '
'35*pow(sin(x), 4)*pow(cos(y), 3) + '
'105*pow(sin(x), 4)*pow(cos(y), 2)*tan(z) + '
'105*pow(sin(x), 4)*cos(y)*pow(tan(z), 2) + '
'35*pow(sin(x), 4)*pow(tan(z), 3) + '
'35*pow(sin(x), 3)*pow(cos(y), 4) + '
'140*pow(sin(x), 3)*pow(cos(y), 3)*tan(z) + '
'210*pow(sin(x), 3)*pow(cos(y), 2)*pow(tan(z), 2) + '
'140*pow(sin(x), 3)*cos(y)*pow(tan(z), 3) + '
'35*pow(sin(x), 3)*pow(tan(z), 4) + '
'21*pow(sin(x), 2)*pow(cos(y), 5) + '
'105*pow(sin(x), 2)*pow(cos(y), 4)*tan(z) + '
'210*pow(sin(x), 2)*pow(cos(y), 3)*pow(tan(z), 2) + '
'210*pow(sin(x), 2)*pow(cos(y), 2)*pow(tan(z), 3) + '
'105*pow(sin(x), 2)*cos(y)*pow(tan(z), 4) + '
'21*pow(sin(x), 2)*pow(tan(z), 5) + '
'7*sin(x)*pow(cos(y), 6) + '
'42*sin(x)*pow(cos(y), 5)*tan(z) + '
'105*sin(x)*pow(cos(y), 4)*pow(tan(z), 2) + '
'140*sin(x)*pow(cos(y), 3)*pow(tan(z), 3) + '
'105*sin(x)*pow(cos(y), 2)*pow(tan(z), 4) + '
'42*sin(x)*cos(y)*pow(tan(z), 5) + '
'7*sin(x)*pow(tan(z), 6) + '
'pow(cos(y), 7) + '
'7*pow(cos(y), 6)*tan(z) + '
'21*pow(cos(y), 5)*pow(tan(z), 2) + '
'35*pow(cos(y), 4)*pow(tan(z), 3) + '
'35*pow(cos(y), 3)*pow(tan(z), 4) + '
'21*pow(cos(y), 2)*pow(tan(z), 5) + '
'7*cos(y)*pow(tan(z), 6) + '
'pow(tan(z), 7);\n'
'}\n'
'double test2(double x, double y, double z) {\n'
' return cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n'
'}\n'
)
assert result[1][0] == "file.h"
assert result[1][1] == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'double test1(double x, double y, double z);\n'
'double test2(double x, double y, double z);\n'
'#endif\n'
)
def test_loops_c():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C", "file", header=False, empty=False)
assert f1 == 'file.c'
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void matrix_vector(double *A, int m, int n, double *x, double *y) {\n'
' for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
' }\n'
' for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = y[i] + %(rhs)s;\n'
' }\n'
' }\n'
'}\n'
)
assert (code == expected %{'rhs': 'A[i*n + j]*x[j]'} or
code == expected %{'rhs': 'A[j + i*n]*x[j]'} or
code == expected %{'rhs': 'x[j]*A[i*n + j]'} or
code == expected %{'rhs': 'x[j]*A[j + i*n]'})
assert f2 == 'file.h'
assert interface == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'void matrix_vector(double *A, int m, int n, double *x, double *y);\n'
'#endif\n'
)
def test_dummy_loops_c():
from sympy.tensor import IndexedBase, Idx
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void test_dummies(int m_%(mno)i, double *x, double *y) {\n'
' for (int i_%(ino)i=0; i_%(ino)i<m_%(mno)i; i_%(ino)i++){\n'
' y[i_%(ino)i] = x[i_%(ino)i];\n'
' }\n'
'}\n'
) % {'ino': i.label.dummy_index, 'mno': m.dummy_index}
r = Routine('test_dummies', Eq(y[i], x[i]))
c = CCodeGen()
code = get_string(c.dump_c, [r])
assert code == expected
def test_partial_loops_c():
# check that loop boundaries are determined by Idx, and array strides
# determined by shape of IndexedBase object.
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m,o,p = symbols('n m o p', integer=True)
A = IndexedBase('A', shape=(m, p))
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', (o, m - 5)) # Note: bounds are inclusive
j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "C", "file", header=False, empty=False)
assert f1 == 'file.c'
expected = (
'#include "file.h"\n'
'#include <math.h>\n'
'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y) {\n'
' for (int i=o; i<%(upperi)s; i++){\n'
' y[i] = 0;\n'
' }\n'
' for (int i=o; i<%(upperi)s; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = y[i] + %(rhs)s;\n'
' }\n'
' }\n'
'}\n'
) % {'upperi': m - 4, 'rhs': '%(rhs)s'}
assert (code == expected %{'rhs': 'A[i*p + j]*x[j]'} or
code == expected %{'rhs': 'A[j + i*p]*x[j]'} or
code == expected %{'rhs': 'x[j]*A[i*p + j]'} or
code == expected %{'rhs': 'x[j]*A[j + i*p]'})
assert f2 == 'file.h'
assert interface == (
'#ifndef PROJECT__FILE__H\n'
'#define PROJECT__FILE__H\n'
'void matrix_vector(double *A, int m, int n, int o, int p, double *x, double *y);\n'
'#endif\n'
)
def test_output_arg_c():
from sympy import sin, cos, Equality
x, y, z = symbols("x,y,z")
r = Routine("foo", [Equality(y, sin(x)), cos(x)])
c = CCodeGen()
result = c.write([r], "test", header=False, empty=False)
assert result[0][0] == "test.c"
expected = (
'#include "test.h"\n'
'#include <math.h>\n'
'double foo(double x, double &y) {\n'
' y = sin(x);\n'
' return cos(x);\n'
'}\n'
)
assert result[0][1] == expected
def test_empty_f_code():
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [])
assert source == ""
def test_empty_f_code_with_header():
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [], header=True)
assert source[:82] == (
"!******************************************************************************\n!*"
)
# " Code generated with sympy 0.7.0-git "
assert source[158:] == ( "*\n"
"!* *\n"
"!* See http://www.sympy.org/ for more information. *\n"
"!* *\n"
"!* This file is part of 'project' *\n"
"!******************************************************************************\n"
)
def test_empty_f_header():
code_gen = FCodeGen()
source = get_string(code_gen.dump_h, [])
assert source == ""
def test_simple_f_code():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"test = z*(x + y)\n"
"end function\n"
)
assert source == expected
def test_numbersymbol_f_code():
routine = Routine("test", pi**Catalan)
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test()\n"
"implicit none\n"
"REAL*8, parameter :: Catalan = 0.915965594177219d0\n"
"REAL*8, parameter :: pi = 3.14159265358979d0\n"
"test = pi**Catalan\n"
"end function\n"
)
assert source == expected
def test_f_code_argument_order():
x,y,z = symbols('x,y,z')
expr = x + y
routine = Routine("test", expr, argument_sequence=[z, x, y])
code_gen = FCodeGen()
source = get_string(code_gen.dump_f95, [routine])
expected = (
"REAL*8 function test(z, x, y)\n"
"implicit none\n"
"REAL*8, intent(in) :: z\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"test = x + y\n"
"end function\n"
)
assert source == expected
def test_simple_f_header():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
routine = Routine("test", expr)
code_gen = FCodeGen()
source = get_string(code_gen.dump_h, [routine])
expected = (
"interface\n"
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"end function\n"
"end interface\n"
)
assert source == expected
def test_simple_f_codegen():
x,y,z = symbols('x,y,z')
expr = (x+y)*z
result = codegen(("test", (x+y)*z), "F95", "file", header=False, empty=False)
expected = [
("file.f90",
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"test = z*(x + y)\n"
"end function\n"),
("file.h",
"interface\n"
"REAL*8 function test(x, y, z)\n"
"implicit none\n"
"REAL*8, intent(in) :: x\n"
"REAL*8, intent(in) :: y\n"
"REAL*8, intent(in) :: z\n"
"end function\n"
"end interface\n")
]
assert result == expected
def test_multiple_results_f():
x,y,z = symbols('x,y,z')
expr1 = (x+y)*z
expr2 = (x-y)*z
routine = Routine(
"test",
[expr1,expr2]
)
code_gen = FCodeGen()
raises(CodeGenError, 'get_string(code_gen.dump_h, [routine])')
def test_no_results_f():
raises(ValueError, 'Routine("test", [])')
def test_intrinsic_math_codegen():
# not included: log10
from sympy import (acos, asin, atan, ceiling, cos, cosh, floor, log, ln,
sin, sinh, sqrt, tan, tanh, N, Abs)
x = symbols('x')
name_expr = [
("test_abs", Abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
# ("test_ceil", ceiling(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
# ("test_floor", floor(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test_abs(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_abs = Abs(x)\n'
'end function\n'
'REAL*8 function test_acos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_acos = acos(x)\n'
'end function\n'
'REAL*8 function test_asin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_asin = asin(x)\n'
'end function\n'
'REAL*8 function test_atan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_atan = atan(x)\n'
'end function\n'
'REAL*8 function test_cos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_cos = cos(x)\n'
'end function\n'
'REAL*8 function test_cosh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_cosh = cosh(x)\n'
'end function\n'
'REAL*8 function test_log(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_log = log(x)\n'
'end function\n'
'REAL*8 function test_ln(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_ln = log(x)\n'
'end function\n'
'REAL*8 function test_sin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sin = sin(x)\n'
'end function\n'
'REAL*8 function test_sinh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sinh = sinh(x)\n'
'end function\n'
'REAL*8 function test_sqrt(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_sqrt = sqrt(x)\n'
'end function\n'
'REAL*8 function test_tan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_tan = tan(x)\n'
'end function\n'
'REAL*8 function test_tanh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'test_tanh = tanh(x)\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test_abs(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_acos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_asin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_atan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_cos(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_cosh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_log(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_ln(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sin(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sinh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_sqrt(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_tan(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_tanh(x)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_intrinsic_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
x, y = symbols('x,y')
name_expr = [
("test_atan2", atan2(x,y)),
("test_pow", x**y),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test_atan2(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'test_atan2 = atan2(x, y)\n'
'end function\n'
'REAL*8 function test_pow(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'test_pow = x**y\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test_atan2(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test_pow(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_complicated_codegen_f95():
from sympy import sin, cos, tan, N
x,y,z = symbols('x,y,z')
name_expr = [
("test1", ((sin(x)+cos(y)+tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x+y+z))))))))),
]
result = codegen(name_expr, "F95", "file", header=False, empty=False)
assert result[0][0] == "file.f90"
expected = (
'REAL*8 function test1(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'test1 = sin(x)**7 + 7*sin(x)**6*cos(y) + 7*sin(x)**6*tan(z) + 21*sin(x) &\n'
' **5*cos(y)**2 + 42*sin(x)**5*cos(y)*tan(z) + 21*sin(x)**5*tan(z) &\n'
' **2 + 35*sin(x)**4*cos(y)**3 + 105*sin(x)**4*cos(y)**2*tan(z) + &\n'
' 105*sin(x)**4*cos(y)*tan(z)**2 + 35*sin(x)**4*tan(z)**3 + 35*sin( &\n'
' x)**3*cos(y)**4 + 140*sin(x)**3*cos(y)**3*tan(z) + 210*sin(x)**3* &\n'
' cos(y)**2*tan(z)**2 + 140*sin(x)**3*cos(y)*tan(z)**3 + 35*sin(x) &\n'
' **3*tan(z)**4 + 21*sin(x)**2*cos(y)**5 + 105*sin(x)**2*cos(y)**4* &\n'
' tan(z) + 210*sin(x)**2*cos(y)**3*tan(z)**2 + 210*sin(x)**2*cos(y) &\n'
' **2*tan(z)**3 + 105*sin(x)**2*cos(y)*tan(z)**4 + 21*sin(x)**2*tan &\n'
' (z)**5 + 7*sin(x)*cos(y)**6 + 42*sin(x)*cos(y)**5*tan(z) + 105* &\n'
' sin(x)*cos(y)**4*tan(z)**2 + 140*sin(x)*cos(y)**3*tan(z)**3 + 105 &\n'
' *sin(x)*cos(y)**2*tan(z)**4 + 42*sin(x)*cos(y)*tan(z)**5 + 7*sin( &\n'
' x)*tan(z)**6 + cos(y)**7 + 7*cos(y)**6*tan(z) + 21*cos(y)**5*tan( &\n'
' z)**2 + 35*cos(y)**4*tan(z)**3 + 35*cos(y)**3*tan(z)**4 + 21*cos( &\n'
' y)**2*tan(z)**5 + 7*cos(y)*tan(z)**6 + tan(z)**7\n'
'end function\n'
'REAL*8 function test2(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'test2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))\n'
'end function\n'
)
assert result[0][1] == expected
assert result[1][0] == "file.h"
expected = (
'interface\n'
'REAL*8 function test1(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'end function\n'
'end interface\n'
'interface\n'
'REAL*8 function test2(x, y, z)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(in) :: y\n'
'REAL*8, intent(in) :: z\n'
'end function\n'
'end interface\n'
)
assert result[1][1] == expected
def test_loops():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n,m', integer=True)
A, x, y = map(IndexedBase, 'Axy')
i = Idx('i', m)
j = Idx('j', n)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False)
assert f1 == 'file.f90'
expected = (
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = 1, m\n'
' y(i) = 0\n'
'end do\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
) % {'rhs': 'A(i, j)*x(j)'}
assert expected == code
assert f2 == 'file.h'
assert interface == (
'interface\n'
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'end subroutine\n'
'end interface\n'
)
def test_dummy_loops_f95():
from sympy.tensor import IndexedBase, Idx
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'subroutine test_dummies(m_%(mcount)i, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m_%(mcount)i\n'
'REAL*8, intent(in), dimension(1:m_%(mcount)i) :: x\n'
'REAL*8, intent(out), dimension(1:m_%(mcount)i) :: y\n'
'INTEGER*4 :: i_%(icount)i\n'
'do i_%(icount)i = 1, m_%(mcount)i\n'
' y(i_%(icount)i) = x(i_%(icount)i)\n'
'end do\n'
'end subroutine\n'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
r = Routine('test_dummies', Eq(y[i], x[i]))
c = FCodeGen()
code = get_string(c.dump_f95, [r])
assert code == expected
def test_loops_InOut():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
i,j,n,m = symbols('i,j,n,m', integer=True)
A,x,y = symbols('A,x,y')
A = IndexedBase(A)[Idx(i, m), Idx(j, n)]
x = IndexedBase(x)[Idx(j, n)]
y = IndexedBase(y)[Idx(i, m)]
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y, y + A*x)), "F95", "file", header=False, empty=False)
assert f1 == 'file.f90'
expected = (
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(inout), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = 1, m\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
)
assert (code == expected % {'rhs': 'A(i, j)*x(j)'} or
code == expected % {'rhs': 'x(j)*A(i, j)'})
assert f2 == 'file.h'
assert interface == (
'interface\n'
'subroutine matrix_vector(A, m, n, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'REAL*8, intent(in), dimension(1:m, 1:n) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(inout), dimension(1:m) :: y\n'
'end subroutine\n'
'end interface\n'
)
def test_partial_loops_f():
# check that loop boundaries are determined by Idx, and array strides
# determined by shape of IndexedBase object.
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m,o,p = symbols('n m o p', integer=True)
A = IndexedBase('A', shape=(m, p))
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', (o, m - 5)) # Note: bounds are inclusive
j = Idx('j', n) # dimension n corresponds to bounds (0, n - 1)
(f1, code), (f2, interface) = codegen(
('matrix_vector', Eq(y[i], A[i, j]*x[j])), "F95", "file", header=False, empty=False)
expected = (
'subroutine matrix_vector(A, m, n, o, p, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'INTEGER*4, intent(in) :: n\n'
'INTEGER*4, intent(in) :: o\n'
'INTEGER*4, intent(in) :: p\n'
'REAL*8, intent(in), dimension(1:m, 1:p) :: A\n'
'REAL*8, intent(in), dimension(1:n) :: x\n'
'REAL*8, intent(out), dimension(1:%(iup-ilow)s) :: y\n'
'INTEGER*4 :: i\n'
'INTEGER*4 :: j\n'
'do i = %(ilow)s, %(iup)s\n'
' y(i) = 0\n'
'end do\n'
'do i = %(ilow)s, %(iup)s\n'
' do j = 1, n\n'
' y(i) = y(i) + %(rhs)s\n'
' end do\n'
'end do\n'
'end subroutine\n'
) % {
'rhs': 'A(i, j)*x(j)',
'iup': str(m - 4),
'ilow': str(1+o),
'iup-ilow': str(m - 4 -o)
}
assert expected == code
def test_output_arg_f():
from sympy import sin, cos, Equality
x, y, z = symbols("x,y,z")
r = Routine("foo", [Equality(y, sin(x)), cos(x)])
c = FCodeGen()
result = c.write([r], "test", header=False, empty=False)
assert result[0][0] == "test.f90"
assert result[0][1] == (
'REAL*8 function foo(x, y)\n'
'implicit none\n'
'REAL*8, intent(in) :: x\n'
'REAL*8, intent(out) :: y\n'
'y = sin(x)\n'
'foo = cos(x)\n'
'end function\n'
)
def test_inline_function():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n,m = symbols('n m', integer=True)
A, x, y = map(IndexedBase, 'Axy')
i = Idx('i', m)
j = Idx('j', n)
p = FCodeGen()
func = implemented_function('func', Lambda(n, n*(n+1)))
routine = Routine('test_inline', Eq(y[i], func(x[i])))
code = get_string(p.dump_f95, [routine])
expected = (
'subroutine test_inline(m, x, y)\n'
'implicit none\n'
'INTEGER*4, intent(in) :: m\n'
'REAL*8, intent(in), dimension(1:m) :: x\n'
'REAL*8, intent(out), dimension(1:m) :: y\n'
'INTEGER*4 :: i\n'
'do i = 1, m\n'
' y(i) = (1 + x(i))*x(i)\n'
'end do\n'
'end subroutine\n'
)
assert code == expected
def test_check_case():
x, X = symbols('x,X')
raises(CodeGenError, "codegen(('test', x*X), 'f95', 'prefix')")
def test_check_case_false_positive():
# The upper case/lower case exception should not be triggered by Sympy
# objects that differ only because of assumptions. (It may be useful to
# have a check for that as well, but here we only want to test against
# false positives with respect to case checking.)
x1 = symbols('x')
x2 = symbols('x', my_assumption=True)
try:
codegen(('test', x1*x2), 'f95', 'prefix')
except CodeGenError, e:
if e.args[0][0:21] == "Fortran ignores case.":
raise AssertionError("This exception should not be raised!")
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities that replicate `Estimator.model_fn` over GPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import replicate_model_fn
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.export import export_output
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
class DNNClassifierIntegrationTest(test_util.TensorFlowTestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def test_complete_flow(self):
n_classes = 3
input_dimension = 2
batch_size = 12
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
categorical_data = np.random.random_integers(
0, len(x_data), size=len(x_data))
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data,
'categories': categorical_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data,
'categories': categorical_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data,
'categories': categorical_data},
batch_size=batch_size,
shuffle=False)
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,)),
feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'categories',
vocabulary_list=np.linspace(
0., len(x_data), len(x_data), dtype=np.int64)), 1)
]
estimator = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
def optimizer_fn():
return optimizers.get_optimizer_instance('Adagrad', learning_rate=0.05)
estimator = estimator_lib.Estimator(
model_fn=replicate_model_fn.replicate_model_fn(
estimator.model_fn,
optimizer_fn,
devices=['/gpu:0', '/gpu:1', '/gpu:2']),
model_dir=estimator.model_dir,
config=estimator.config,
params=estimator.params)
num_steps = 10
estimator.train(train_input_fn, steps=num_steps)
scores = estimator.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops_lib.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
class ReplicateModelTest(test_util.TensorFlowTestCase):
def model_fn(self, mode, features, labels, params):
c = variable_scope.get_variable(
'c',
initializer=constant_op.constant(10, dtype=dtypes.float64),
dtype=dtypes.float64)
predictions = math_ops.multiply(features, c)
loss = None
if mode is not model_fn_lib.ModeKeys.PREDICT:
loss = losses.absolute_difference(
labels=labels,
predictions=predictions,
reduction=losses.Reduction.SUM)
loss = math_ops.reduce_sum(loss)
metrics = {
'accuracy': metrics_lib.accuracy(labels, predictions),
'auc': metrics_lib.auc(labels, predictions)
}
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops=metrics,
predictions={'probabilities': predictions},
train_op=control_flow_ops.no_op()) # This train_op isn't actually used.
def optimizer_fn(self, params):
return gradient_descent.GradientDescentOptimizer(params['learning_rate'])
@property
def params(self):
params = {}
params['learning_rate'] = 1.0
return params
def test_train(self):
features = np.array([[1.0], [2.0]])
labels = np.array([[1.0], [2.0]])
with self.test_session() as session:
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)
session.run(variables.global_variables_initializer())
# loss = feature * c - label
total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)
self.assertEqual(total_loss, session.run(estimator_spec.loss))
# loss' of c is 3.
# new value of c = 10 - learning rate * 3 = 7.0.
session.run(estimator_spec.train_op)
with variable_scope.variable_scope('', reuse=True):
c = variable_scope.get_variable('c', dtype=dtypes.float64)
self.assertEqual(7.0, session.run(c))
def test_train_spec_with_optimizer_without_params(self):
def optimizer_fn_without_params():
return gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
features = np.array([[1.0], [2.0]])
labels = np.array([[1.0], [2.0]])
with self.test_session() as session: # pylint: disable=unused-variable
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn,
optimizer_fn_without_params,
devices=['/gpu:0', '/gpu:1'])
# This call is going to fail if `replicated_model_fn` is still passing
# `params` inside `optimizer_fn`, even though the latter doesn't take any:
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)
del estimator_spec
def test_eval(self):
features = np.array([[0.01], [0.002]])
labels = np.array([[0.01], [0.02]])
with self.test_session() as session:
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, self.params)
session.run(variables.local_variables_initializer())
session.run(variables.global_variables_initializer())
accuracy, a = estimator_spec.eval_metric_ops['accuracy']
auc, b = estimator_spec.eval_metric_ops['auc']
session.run([a, b])
accuracy = session.run(accuracy)
auc = session.run(auc)
# loss[i] = features[i] * 10 - labels[i].
# Accuracy is 0.0 (no match) in the first tower.
# Accuracy is 1.0 (match) in the second tower, since the feature
# times weight "c" happened to be equal to the label.
total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02))
self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)
self.assertEqual(0, auc)
self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)
def test_predict(self):
features = np.array([[0.01], [0.002]])
labels = np.array([[0.01], [0.02]])
with self.test_session() as session:
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.PREDICT, self.params)
session.run(variables.global_variables_initializer())
self.assertAllClose({
'probabilities': np.array([[0.1], [0.02]])
}, session.run(estimator_spec.predictions))
def test_train_single_tower(self):
features = np.array([[1.0], [2.0]])
labels = np.array([[1.0], [2.0]])
with self.test_session() as session:
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn)
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)
session.run(variables.global_variables_initializer())
# loss = feature * c - label
total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)
self.assertEqual(total_loss, session.run(estimator_spec.loss))
# loss' of c is 3.
# new value of c = 10 - learning rate * 3 = 7.0.
session.run(estimator_spec.train_op)
with variable_scope.variable_scope('', reuse=True):
c = variable_scope.get_variable('c', dtype=dtypes.float64)
self.assertEqual(7.0, session.run(c))
def test_eval_single_tower(self):
features = np.array([[0.01], [0.002]])
labels = np.array([[0.01], [0.02]])
with self.test_session() as session:
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn, devices=['/gpu:0'])
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, self.params)
session.run(variables.local_variables_initializer())
session.run(variables.global_variables_initializer())
accuracy, a = estimator_spec.eval_metric_ops['accuracy']
auc, b = estimator_spec.eval_metric_ops['auc']
session.run([a, b])
accuracy = session.run(accuracy)
auc = session.run(auc)
# Accuracy is 0.0 (no match) in the first tower.
# Accuracy is 1.0 (match) in the second tower, since the feature
# times weight "c" happened to be equal to the label.
total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02))
self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)
self.assertEqual(0, auc)
self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)
def test_predict_single_tower(self):
features = np.array([[0.01], [0.002]])
labels = np.array([[0.01], [0.02]])
with self.test_session() as session:
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn, devices=['/gpu:0'])
estimator_spec = replicated_model_fn(
features, labels, model_fn_lib.ModeKeys.PREDICT, self.params)
session.run(variables.global_variables_initializer())
self.assertAllClose({
'probabilities': np.array([[0.1], [0.02]])
}, session.run(estimator_spec.predictions))
class GetLossTowersTest(test_util.TensorFlowTestCase):
def model_fn(self, mode, features, labels, params):
c = variable_scope.get_variable(
'c',
initializer=constant_op.constant(0.25, dtype=dtypes.float64),
dtype=dtypes.float64)
predictions = math_ops.add(np.array([0.1, 0.2, 0.3, features[0]]), c)
labels = np.array([0.1, 0.2, 0.3, labels[0]])
loss = losses.absolute_difference(
labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)
return model_fn_lib.EstimatorSpec(mode=mode, loss=math_ops.reduce_sum(loss))
def test_gradients_are_computed(self):
with self.test_session() as session:
tower_specs = replicate_model_fn._get_loss_towers(
self.model_fn,
mode=None,
features=[[0.6], [1.6]],
labels=[[0.6], [0.6]],
params=None,
config=None,
devices=['/gpu:0', '/gpu:1'],
local_ps_device='/gpu:0',
name_scope_pattern='test_tower_{}')
session.run(variables.global_variables_initializer())
self.assertEqual(len(tower_specs), 2)
self.assertEqual('/device:GPU:0', tower_specs[0].loss.device)
self.assertEqual('Sum:0', tower_specs[0].loss.name)
self.assertEqual(1.0, session.run(tower_specs[0].loss))
self.assertEqual('/device:GPU:1', tower_specs[1].loss.device)
self.assertEqual('test_tower_1/Sum:0', tower_specs[1].loss.name)
# The input batch for the second tower had a loss that is 1.0
# bigger: 0.6 vs 1.6.
self.assertEqual(2.0, session.run(tower_specs[1].loss))
self.assertEqual(1, len(variables.global_variables()))
self.assertEqual(1, len(variables.trainable_variables()))
with variable_scope.variable_scope('', reuse=True):
c = variable_scope.get_variable('c', dtype=dtypes.float64)
self.assertEqual(0.25, session.run(c))
class SplitBatchTest(test_util.TensorFlowTestCase):
def evaluate_shards(self, first_list, second_list):
evaluate_items = lambda x: x.eval()
return list(map(evaluate_items, first_list)), list(
map(evaluate_items, second_list))
def test_simple_half_split(self):
with self.test_session() as session: # pylint: disable=unused-variable
features = [0.0, 1.0, 2.0, 3.0]
labels = [10.0, 11.0, 12.0, 13.0]
feature_shards, label_shards = replicate_model_fn._split_batch(
features, labels, 2, device='/gpu:0')
feature_shards, label_shards = self.evaluate_shards(
feature_shards, label_shards)
self.assertAllEqual([[0.0, 1.0], [2.0, 3.0]], feature_shards)
self.assertAllEqual([[10.0, 11.0], [12.0, 13.0]], label_shards)
def test_to_each_their_own(self):
with self.test_session() as session: # pylint: disable=unused-variable
features = [0.0, 1.0, 2.0, 3.0]
labels = [10.0, 11.0, 12.0, 13.0]
feature_shards, label_shards = replicate_model_fn._split_batch(
features, labels, 4, device='/gpu:0')
feature_shards, label_shards = self.evaluate_shards(
feature_shards, label_shards)
self.assertAllEqual([[0.0], [1.0], [2.0], [3.0]], feature_shards)
self.assertAllEqual([[10.0], [11.0], [12.0], [13.0]], label_shards)
def test_one_batch(self):
with self.test_session() as session: # pylint: disable=unused-variable
features = [0.0, 1.0, 2.0, 3.0]
labels = [10.0, 11.0, 12.0, 13.0]
feature_shards, label_shards = replicate_model_fn._split_batch(
features, labels, 1, device='/gpu:0')
feature_shards, label_shards = self.evaluate_shards(
feature_shards, label_shards)
self.assertAllEqual([[0.0, 1.0, 2.0, 3.0]], feature_shards)
self.assertAllEqual([[10.0, 11.0, 12.0, 13.0]], label_shards)
def test_half_split_in_dictionary(self):
with self.test_session() as session: # pylint: disable=unused-variable
features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}
labels = [10.0, 11.0, 12.0, 13.0]
feature_shards, label_shards = replicate_model_fn._split_batch(
features, labels, 2, device='/gpu:0')
self.assertAllEqual([0.0, 1.0], feature_shards[0]['first'].eval())
self.assertAllEqual([4.0, 5.0], feature_shards[0]['second'].eval())
self.assertAllEqual([2.0, 3.0], feature_shards[1]['first'].eval())
self.assertAllEqual([6.0, 7.0], feature_shards[1]['second'].eval())
self.assertAllEqual([10.0, 11.0], label_shards[0].eval())
self.assertAllEqual([12.0, 13.0], label_shards[1].eval())
def test_one_batch_in_dictionary(self):
with self.test_session() as session: # pylint: disable=unused-variable
features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}
labels = [10.0, 11.0, 12.0, 13.0]
feature_shards, label_shards = replicate_model_fn._split_batch(
features, labels, 1, device='/gpu:0')
self.assertAllEqual([0.0, 1.0, 2.0, 3.0],
feature_shards[0]['first'].eval())
self.assertAllEqual([4.0, 5.0, 6.0, 7.0],
feature_shards[0]['second'].eval())
self.assertAllEqual([10.0, 11.0, 12.0, 13.0], label_shards[0].eval())
def test_feature_and_label_dictionaries(self):
with self.test_session() as session: # pylint: disable=unused-variable
features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}
labels = {'first': [10.0, 11.0], 'second': [12.0, 13.0]}
feature_shards, label_shards = replicate_model_fn._split_batch(
features, labels, 2, device='/gpu:0')
self.assertAllEqual([0.0, 1.0], feature_shards[0]['first'].eval())
self.assertAllEqual([4.0, 5.0], feature_shards[0]['second'].eval())
self.assertAllEqual([2.0, 3.0], feature_shards[1]['first'].eval())
self.assertAllEqual([6.0, 7.0], feature_shards[1]['second'].eval())
self.assertAllEqual([10.0], label_shards[0]['first'].eval())
self.assertAllEqual([12.0], label_shards[0]['second'].eval())
self.assertAllEqual([11], label_shards[1]['first'].eval())
self.assertAllEqual([13.0], label_shards[1]['second'].eval())
class TrainSpecTest(test_util.TensorFlowTestCase):
expected_predictions = {}
def create_estimator_spec(self, loss):
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.TRAIN,
loss=loss,
train_op=loss, # Not used; currently required.
predictions=self.expected_predictions)
def create_constant_loss(self, loss_value):
return constant_op.constant(loss_value, dtype=dtypes.float64)
def test_example(self):
with self.test_session() as session:
tower_losses = list(map(self.create_constant_loss, [2, 4, 6]))
tower_specs = list(map(self.create_estimator_spec, tower_losses))
expected_train_op = tower_losses[1]
estimator_spec = replicate_model_fn._train_spec(
tower_specs, expected_train_op, aggregation_device='/gpu:0')
self.assertEqual(expected_train_op, estimator_spec.train_op)
self.assertEqual(2 + 4 + 6, session.run(estimator_spec.loss))
self.assertEqual(self.expected_predictions, estimator_spec.predictions)
class EvalSpecTest(test_util.TensorFlowTestCase):
def create_estimator_spec(self, loss, metrics):
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.EVAL, loss=loss, eval_metric_ops=metrics)
def create_constant_loss(self, loss_value):
return constant_op.constant(loss_value, dtype=dtypes.float64)
def create_eval_metrics(self, noise):
predictions = np.array([0.1, 0.2, 0.3, 0.6 + noise])
labels = np.array([0.1, 0.2, 0.3, 0.6])
metrics = {
'accuracy': metrics_lib.accuracy(labels, predictions),
'auc': metrics_lib.auc(labels, predictions)
}
return metrics
def test_example(self):
with self.test_session() as session:
tower_losses = map(self.create_constant_loss, [2, 4, 6])
tower_metrics = map(self.create_eval_metrics, [0, 0.2, 0.3])
tower_specs = [
self.create_estimator_spec(l, m)
for l, m in zip(tower_losses, tower_metrics)
]
session.run(variables.local_variables_initializer())
estimator_spec = replicate_model_fn._eval_spec(
tower_specs, aggregation_device='/device:GPU:0')
accuracy, a = estimator_spec.eval_metric_ops['accuracy']
auc, b = estimator_spec.eval_metric_ops['auc']
self.assertEqual('/device:CPU:0', accuracy.device)
self.assertEqual('/device:CPU:0', auc.device)
session.run([a, b])
accuracy, auc = session.run([accuracy, auc])
self.assertNear((12 - 2) / 12, accuracy, 0.01)
self.assertEqual(0, auc)
self.assertEqual(2 + 4 + 6, session.run(estimator_spec.loss))
def test_handles_single_tower(self):
with self.test_session() as session:
tower_losses = map(self.create_constant_loss, [5])
tower_metrics = map(self.create_eval_metrics, [0.2])
tower_specs = [
self.create_estimator_spec(l, m)
for l, m in zip(tower_losses, tower_metrics)
]
session.run(variables.local_variables_initializer())
estimator_spec = replicate_model_fn._eval_spec(
tower_specs, aggregation_device='/device:GPU:0')
accuracy, a = estimator_spec.eval_metric_ops['accuracy']
auc, b = estimator_spec.eval_metric_ops['auc']
self.assertEqual('/device:CPU:0', accuracy.device)
self.assertEqual('/device:CPU:0', auc.device)
session.run([a, b])
accuracy = session.run(accuracy)
auc = session.run(auc)
self.assertNear((4 - 1) / 4, accuracy, 0.01)
self.assertEqual(0, auc)
self.assertEqual(5, session.run(estimator_spec.loss))
class PredictSpecTest(test_util.TensorFlowTestCase):
def model_fn(self, mode, features, labels, params):
c = variable_scope.get_variable(
'c',
initializer=constant_op.constant(0.25, dtype=dtypes.float64),
dtype=dtypes.float64)
predictions = math_ops.add(np.array([features[0], features[0]]), c)
return model_fn_lib.EstimatorSpec(
mode=model_fn_lib.ModeKeys.PREDICT,
predictions={
'probabilities': predictions
})
def test_example(self):
with self.test_session() as session:
tower_specs = replicate_model_fn._get_loss_towers(
self.model_fn,
mode=None,
features=[[0.1], [0.2]],
labels=[[], []],
params=None,
config=None,
devices=['/gpu:0', '/gpu:1'],
local_ps_device='/gpu:0',
)
session.run(variables.global_variables_initializer())
estimator_spec = replicate_model_fn._predict_spec(
tower_specs, aggregation_device='/gpu:0')
self.assertEqual('/device:GPU:0',
estimator_spec.predictions['probabilities'].device)
self.assertAllClose({
'probabilities': np.array([0.35, 0.35, 0.45, 0.45])
}, session.run(estimator_spec.predictions))
class ReduceMetricVariablesTest(test_util.TensorFlowTestCase):
def create_metric_variable(self, initial_value, name):
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops_lib.GraphKeys.METRIC_VARIABLES],
validate_shape=True,
name=name)
def create_tower_metrics(self, tower_id):
with variable_scope.variable_scope('', reuse=(tower_id != 0)):
self.create_metric_variable(1.3 * (tower_id + 1), 'total')
self.create_metric_variable(2.3 * (tower_id + 1), 'count')
self.create_metric_variable(
np.array([3.3, 3.5, 3.7]) * (tower_id + 1), 'total')
def test_example(self):
with self.test_session() as session:
for tower_id in range(3):
self.create_tower_metrics(tower_id)
session.run(
variables.variables_initializer(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
session.run(
replicate_model_fn._reduce_metric_variables(number_of_towers=3))
# 1st tower = 1.3, 2.3, [3.3, 3.5, 3.7]
# 2nd tower = 2.6, 4.6, [6.6, 7.0, 7.4]
# 3rd tower = 3.9, 6.9, [9.9, 10.5, 11.1]
# Reduced = 7.8, 13.8, [19.8, 21.0, 22.2]
# Towers are accumulated in the first tower.
local_metrics = session.run(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))
self.assertNear(7.8, local_metrics[0], 0.01)
self.assertNear(13.8, local_metrics[1], 0.01)
self.assertAllClose([19.8, 21., 22.1], local_metrics[2], 0.01)
self.assertNear(0.0, local_metrics[3], 0.01)
self.assertNear(0.0, local_metrics[4], 0.01)
self.assertAllClose([0.0, 0.0, 0.0], local_metrics[5], 0.01)
self.assertNear(0.0, local_metrics[6], 0.01)
self.assertNear(0.0, local_metrics[7], 0.01)
self.assertAllClose([0.0, 0.0, 0.0], local_metrics[8], 0.01)
def test_reduce_is_idempotent(self):
with self.test_session() as session:
for tower_id in range(3):
self.create_tower_metrics(tower_id)
session.run(
variables.variables_initializer(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
for _ in range(20):
session.run(
replicate_model_fn._reduce_metric_variables(number_of_towers=3))
local_metrics = session.run(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))
self.assertNear(7.8, local_metrics[0], 0.01)
self.assertNear(13.8, local_metrics[1], 0.01)
self.assertAllClose([19.8, 21., 22.1], local_metrics[2], 0.01)
self.assertNear(0.0, local_metrics[3], 0.01)
self.assertNear(0.0, local_metrics[4], 0.01)
self.assertAllClose([0.0, 0.0, 0.0], local_metrics[5], 0.01)
self.assertNear(0.0, local_metrics[6], 0.01)
self.assertNear(0.0, local_metrics[7], 0.01)
self.assertAllClose([0.0, 0.0, 0.0], local_metrics[8], 0.01)
def test_handles_single_tower(self):
with self.test_session() as session:
self.create_tower_metrics(0)
session.run(
variables.variables_initializer(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
session.run(
replicate_model_fn._reduce_metric_variables(number_of_towers=1))
local_metrics = session.run(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))
self.assertNear(1.3, local_metrics[0], 0.01)
self.assertNear(2.3, local_metrics[1], 0.01)
self.assertAllClose([3.3, 3.5, 3.7], local_metrics[2], 0.01)
def test_doesnt_accept_uneven_number_of_variables(self):
with self.test_session() as session:
for tower_id in range(3):
self.create_tower_metrics(tower_id)
self.create_metric_variable(-1.0, 'oddball')
session.run(
variables.variables_initializer(
ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))
with self.assertRaisesRegexp(ValueError, ''):
session.run(
replicate_model_fn._reduce_metric_variables(number_of_towers=3))
class MergeExportOutputsTest(test_util.TensorFlowTestCase):
def optimizer_fn(self):
return gradient_descent.GradientDescentOptimizer(1.0)
def model_fn(self, mode, features, labels, params):
c = variable_scope.get_variable(
'c',
initializer=constant_op.constant(10, dtype=dtypes.float64),
dtype=dtypes.float64)
predictions = {'probabilities': math_ops.multiply(features, c)}
loss = losses.absolute_difference(
labels=labels,
predictions=predictions['probabilities'],
reduction=losses.Reduction.SUM)
metrics = {
'accuracy': metrics_lib.accuracy(labels, predictions['probabilities']),
'auc': metrics_lib.auc(labels, predictions['probabilities'])
}
tensor_string_repr = str(features)
classes = constant_op.constant(
re.search('(split_inputs/split:[0-9])', tensor_string_repr).group(1),
dtype=dtypes.string)
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(predictions),
'classification_output':
export_output.ClassificationOutput(predictions['probabilities'],
classes),
'classification_scores':
export_output.ClassificationOutput(
scores=predictions['probabilities']),
'classification_classes':
export_output.ClassificationOutput(classes=classes),
'regression_output':
export_output.RegressionOutput(predictions['probabilities']),
}
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=math_ops.reduce_sum(loss),
eval_metric_ops=metrics,
predictions=predictions,
train_op=loss, # This train_op isn't actually used.
export_outputs=export_outputs)
def replicate_estimator_spec(self, session):
features = np.array([0.01, 0.002])
labels = np.array([0.01, 0.02])
replicated_model_fn = replicate_model_fn.replicate_model_fn(
self.model_fn, self.optimizer_fn, devices=['/gpu:0', '/gpu:1'])
estimator_spec = replicated_model_fn(features, labels,
model_fn_lib.ModeKeys.PREDICT, {})
session.run(variables.global_variables_initializer())
return estimator_spec
def test_merde_predict_output(self):
with self.test_session() as session:
estimator_spec = self.replicate_estimator_spec(session)
self.assertAllClose(
{
'probabilities': np.array([0.1, 0.02])
},
session.run(estimator_spec.export_outputs[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].outputs))
def test_merge_classification_output_scores_classes(self):
with self.test_session() as session:
estimator_spec = self.replicate_estimator_spec(session)
self.assertAllClose(
[0.1, 0.02],
session.run(
estimator_spec.export_outputs['classification_output'].scores))
self.assertAllEqual(
[b'split_inputs/split:0', b'split_inputs/split:1'],
session.run(
estimator_spec.export_outputs['classification_output'].classes))
def test_merge_classification_output_scores(self):
with self.test_session() as session:
estimator_spec = self.replicate_estimator_spec(session)
self.assertAllClose(
[0.1, 0.02],
session.run(
estimator_spec.export_outputs['classification_scores'].scores))
self.assertEqual(
None, estimator_spec.export_outputs['classification_scores'].classes)
def test_merge_classification_output_classes(self):
with self.test_session() as session:
estimator_spec = self.replicate_estimator_spec(session)
self.assertAllEqual(
[b'split_inputs/split:0', b'split_inputs/split:1'],
session.run(
estimator_spec.export_outputs['classification_classes'].classes))
self.assertEqual(
None, estimator_spec.export_outputs['classification_classes'].scores)
def test_merge_regression_output(self):
with self.test_session() as session:
estimator_spec = self.replicate_estimator_spec(session)
self.assertAllClose(
[0.1, 0.02],
session.run(estimator_spec.export_outputs['regression_output'].value))
class GetLocalDevicesTest(test_util.TensorFlowTestCase):
def test_there_is_at_least_a_cpu(self):
self.assertTrue(replicate_model_fn._get_local_devices('CPU'))
def test_there_is_no_xpu(self):
self.assertFalse(
replicate_model_fn._get_local_devices('XPU')) # XPU doesn't exist.
def test_whether_there_is_a_gpu(self):
self.assertEqual(
len(replicate_model_fn._get_local_devices('GPU')),
test.is_gpu_available())
class LocalDeviceSetterTest(test_util.TensorFlowTestCase):
def test_vars_are_on_ps_but_ops_are_on_workers(self):
local_device_setter = replicate_model_fn._local_device_setter(
ps_device='/device:GPU:3', worker_device='/device:GPU:2')
with ops_lib.device(local_device_setter):
c = variables.Variable(0.01)
self.assertEqual('/device:GPU:3', c.device)
cc = variables.Variable(0.02)
self.assertEqual('/device:GPU:3', cc.device)
ccc = variables.Variable(0.03)
self.assertEqual('/device:GPU:3', ccc.device)
c_op = array_ops.concat(c, axis=0)
self.assertEqual('/device:GPU:2', c_op.device)
cc_op = array_ops.concat(cc, axis=0)
self.assertEqual('/device:GPU:2', cc_op.device)
class ComputeSumWithDevicePlacementTest(test_util.TensorFlowTestCase):
def test_vectors(self):
with self.test_session() as session:
total = replicate_model_fn._compute_sum_on_device(
[1.0, 2.0, 3.0, 4.0], device='/device:GPU:0', name='test_sum')
self.assertEqual('/device:GPU:0', total.device)
self.assertEqual('test_sum', total.op.name)
self.assertEqual(10.0, session.run(total))
def test_tensors(self):
with self.test_session() as session:
total = replicate_model_fn._compute_sum_on_device(
[[1.0, 2.0], [3.0, 4.0]], device='/device:GPU:0', name='test_sum')
self.assertEqual('/device:GPU:0', total.device)
self.assertEqual('test_sum', total.op.name)
self.assertAllEqual([4.0, 6.0], session.run(total))
def test_indexedslices(self):
with self.test_session() as session:
a = ops_lib.IndexedSlices(
constant_op.constant([1.0, 2.0]), [0, 1],
dense_shape=constant_op.constant([2]))
b = ops_lib.IndexedSlices(constant_op.constant([3.0, 4.0]), [0, 1])
total = replicate_model_fn._compute_sum_on_device(
[a, b], device='/device:GPU:0')
self.assertEqual('/device:GPU:0', total.device)
self.assertAllEqual([4.0, 6.0],
session.run(ops_lib.convert_to_tensor(total)))
def test_indexedslices_higher_dimensions(self):
with self.test_session() as session:
a = ops_lib.IndexedSlices(
constant_op.constant([[1.0, 5.0], [2.0, 6.0]]), [0, 1],
dense_shape=constant_op.constant([2, 4]))
b = ops_lib.IndexedSlices(
constant_op.constant([[3.0, 7.0], [4.0, 8.0]]), [0, 1])
total = replicate_model_fn._compute_sum_on_device(
[a, b], device='/device:GPU:0')
self.assertEqual('/device:GPU:0', total.device)
self.assertAllEqual([[4.0, 12.0], [6.0, 14.0]],
session.run(ops_lib.convert_to_tensor(total)))
def test_indexedslices_some_dont_overlap(self):
with self.test_session() as session:
a = ops_lib.IndexedSlices(
constant_op.constant([1.0, 2.0]), [0, 3],
dense_shape=constant_op.constant([4]))
b = ops_lib.IndexedSlices(constant_op.constant([3.0, 4.0]), [0, 1])
total = replicate_model_fn._compute_sum_on_device(
[a, b], device='/device:GPU:0')
self.assertEqual('/device:GPU:0', total.device)
self.assertAllEqual([4.0, 4.0, 0.0, 2.0],
session.run(ops_lib.convert_to_tensor(total)))
def test_no_name_for_indexslices(self):
a = ops_lib.IndexedSlices(
constant_op.constant([1.0, 2.0]), [0, 1],
dense_shape=constant_op.constant([2]))
b = ops_lib.IndexedSlices(constant_op.constant([3.0, 4.0]), [0, 1])
with self.assertRaisesRegexp(ValueError, ''):
_ = replicate_model_fn._compute_sum_on_device(
[a, b], device='/device:GPU:0', name='cant_name_indexslices')
class ConcatTensorDictsTest(test_util.TensorFlowTestCase):
def test_example(self):
tensor_dicts = [
{
'a': np.array([1.0, 2.0]),
'b': np.array([11.0]),
'c': np.array([21.0]),
},
{
'a': np.array([3.0]),
'b': np.array([12.0, 13.0]),
},
{
'b': np.array([14.0]),
},
]
with self.test_session() as session:
self.assertAllClose({
'a': np.array([1.0, 2.0, 3.0]),
'b': np.array([11.0, 12.0, 13.0, 14.0]),
'c': np.array([21.0]),
}, session.run(replicate_model_fn._concat_tensor_dicts(*tensor_dicts)))
if __name__ == '__main__':
test.main()
|
|
from __future__ import division
import sys
import albow # used for translation update
from pygame import Rect, Surface, image
from pygame.locals import K_RETURN, K_KP_ENTER, K_ESCAPE, K_TAB, KEYDOWN, SRCALPHA
from pygame.mouse import set_cursor
from pygame.cursors import arrow as arrow_cursor
from pygame.transform import rotozoom
from vectors import add, subtract
from utils import frame_rect
import theme
from theme import ThemeProperty, FontProperty
import resource
from numpy import fromstring
from OpenGL import GL, GLU
debug_rect = False
debug_tab = True
root_widget = None
current_cursor = None
def overridable_property(name, doc=None):
"""Creates a property which calls methods get_xxx and set_xxx of
the underlying object to get and set the property value, so that
the property's behaviour may be easily overridden by subclasses."""
getter_name = intern('get_' + name)
setter_name = intern('set_' + name)
return property(
lambda self: getattr(self, getter_name)(),
lambda self, value: getattr(self, setter_name)(value),
None,
doc)
def rect_property(name):
def get(self):
return getattr(self._rect, name)
def set(self, value):
r = self._rect
old_size = r.size
setattr(r, name, value)
new_size = r.size
if old_size != new_size:
self._resized(old_size)
return property(get, set)
# noinspection PyPropertyAccess
class Widget(object):
# rect Rect bounds in parent's coordinates
# parent Widget containing widget
# subwidgets [Widget] contained widgets
# focus_switch Widget subwidget to receive key events
# fg_color color or None to inherit from parent
# bg_color color to fill background, or None
# visible boolean
# border_width int width of border to draw around widget, or None
# border_color color or None to use widget foreground color
# tab_stop boolean stop on this widget when tabbing
# anchor string of 'ltrb'
font = FontProperty('font')
fg_color = ThemeProperty('fg_color')
bg_color = ThemeProperty('bg_color')
bg_image = ThemeProperty('bg_image')
scale_bg = ThemeProperty('scale_bg')
border_width = ThemeProperty('border_width')
border_color = ThemeProperty('border_color')
sel_color = ThemeProperty('sel_color')
margin = ThemeProperty('margin')
menu_bar = overridable_property('menu_bar')
is_gl_container = overridable_property('is_gl_container')
tab_stop = False
enter_response = None
cancel_response = None
anchor = 'ltwh'
debug_resize = False
_menubar = None
_visible = True
_is_gl_container = False
redraw_every_event = True
tooltip = None
tooltipText = None
doNotTranslate = False
# 'name' is used to track widgets without parent
name = 'Widget'
def __init__(self, rect=None, **kwds):
if rect and not isinstance(rect, Rect):
raise TypeError("Widget rect not a pygame.Rect")
self._rect = Rect(rect or (0, 0, 100, 100))
#-# Translation live update preparation
self.__lang = albow.translate.getLang()
self.__update_translation = False
self.shrink_wrapped = False
#-#
self.parent = None
self.subwidgets = []
self.focus_switch = None
self.is_modal = False
self.set(**kwds)
self.root = self.get_root()
self.setup_spacings()
#-# Translation live update preparation
@property
def get_update_translation(self):
return self.__update_translation
def set_update_ui(self, v):
if v:
self.font = self.predict_font({})
for widget in self.subwidgets:
widget.set_update_ui(v)
if self.shrink_wrapped:
self.shrink_wrap()
if hasattr(self, 'calc_size'):
self.calc_size()
self.invalidate()
self.__update_translation = v
#-#
def setup_spacings(self):
def new_size(size):
size = float(size * 1000) / float(100)
size = int(size * resource.font_proportion / 1000)
return size
self.margin = new_size(self.margin)
if hasattr(self, 'spacing'):
self.spacing = new_size(self.spacing)
def set(self, **kwds):
for name, value in kwds.iteritems():
if not hasattr(self, name):
raise TypeError("Unexpected keyword argument '%s'" % name)
setattr(self, name, value)
def get_rect(self):
return self._rect
def set_rect(self, x):
old_size = self._rect.size
self._rect = Rect(x)
self._resized(old_size)
resizing_axes = {'h': 'lr', 'v': 'tb'}
resizing_values = {'': [0], 'm': [1], 's': [0, 1]}
def set_resizing(self, axis, value):
chars = self.resizing_axes[axis]
anchor = self.anchor
for c in chars:
anchor = anchor.replace(c, '')
for i in self.resizing_values[value]:
anchor += chars[i]
self.anchor = anchor + value
def _resized(self, (old_width, old_height)):
new_width, new_height = self._rect.size
dw = new_width - old_width
dh = new_height - old_height
if dw or dh:
self.resized(dw, dh)
def resized(self, dw, dh):
if self.debug_resize:
print "Widget.resized:", self, "by", (dw, dh), "to", self.size
for widget in self.subwidgets:
widget.parent_resized(dw, dh)
def parent_resized(self, dw, dh):
debug_resize = self.debug_resize or getattr(self.parent, 'debug_resize', False)
if debug_resize:
print "Widget.parent_resized:", self, "by", (dw, dh)
left, top, width, height = self._rect
move = False
resize = False
anchor = self.anchor
if dw:
factors = [1, 1, 1] # left, width, right
if 'r' in anchor:
factors[2] = 0
if 'w' in anchor:
factors[1] = 0
if 'l' in anchor:
factors[0] = 0
if any(factors):
resize = factors[1]
move = factors[0] or factors[2]
#print "lwr", factors
left += factors[0] * dw / sum(factors)
width += factors[1] * dw / sum(factors)
#left = (left + width) + factors[2] * dw / sum(factors) - width
if dh:
factors = [1, 1, 1] # bottom, height, top
if 't' in anchor:
factors[2] = 0
if 'h' in anchor:
factors[1] = 0
if 'b' in anchor:
factors[0] = 0
if any(factors):
resize = factors[1]
move = factors[0] or factors[2]
#print "bht", factors
top += factors[2] * dh / sum(factors)
height += factors[1] * dh / sum(factors)
#top = (top + height) + factors[0] * dh / sum(factors) - height
if resize:
if debug_resize:
print "Widget.parent_resized: changing rect to", (left, top, width, height)
self.rect = Rect((left, top, width, height))
elif move:
if debug_resize:
print "Widget.parent_resized: moving to", (left, top)
self._rect.topleft = (left, top)
rect = property(get_rect, set_rect)
left = rect_property('left')
right = rect_property('right')
top = rect_property('top')
bottom = rect_property('bottom')
width = rect_property('width')
height = rect_property('height')
size = rect_property('size')
topleft = rect_property('topleft')
topright = rect_property('topright')
bottomleft = rect_property('bottomleft')
bottomright = rect_property('bottomright')
midleft = rect_property('midleft')
midright = rect_property('midright')
midtop = rect_property('midtop')
midbottom = rect_property('midbottom')
center = rect_property('center')
centerx = rect_property('centerx')
centery = rect_property('centery')
def get_visible(self):
return self._visible
def set_visible(self, x):
self._visible = x
visible = overridable_property('visible')
def add(self, arg, index=None):
if arg:
if isinstance(arg, Widget):
if index is not None:
arg.set_parent(self, index)
else:
arg.set_parent(self)
else:
for item in arg:
self.add(item)
def add_centered(self, widget):
w, h = self.size
widget.center = w // 2, h // 2
self.add(widget)
def remove(self, widget):
if widget in self.subwidgets:
widget.set_parent(None)
def set_parent(self, parent, index=None):
if parent is not self.parent:
if self.parent:
self.parent._remove(self)
self.parent = parent
if parent:
parent._add(self, index)
def all_parents(self):
widget = self
parents = []
while widget.parent:
parents.append(widget.parent)
widget = widget.parent
return parents
def _add(self, widget, index=None):
if index is not None:
self.subwidgets.insert(index, widget)
else:
self.subwidgets.append(widget)
if hasattr(widget, "idleevent"):
#print "Adding idle handler for ", widget
self.root.add_idle_handler(widget)
def _remove(self, widget):
if hasattr(widget, "idleevent"):
#print "Removing idle handler for ", widget
self.root.remove_idle_handler(widget)
self.subwidgets.remove(widget)
if self.focus_switch is widget:
self.focus_switch = None
def draw_all(self, surface):
if self.visible:
surf_rect = surface.get_rect()
bg_image = self.bg_image
if bg_image:
assert isinstance(bg_image, Surface)
if self.scale_bg:
bg_width, bg_height = bg_image.get_size()
width, height = self.size
if width > bg_width or height > bg_height:
hscale = width / bg_width
vscale = height / bg_height
bg_image = rotozoom(bg_image, 0.0, max(hscale, vscale))
r = bg_image.get_rect()
r.center = surf_rect.center
surface.blit(bg_image, r)
else:
bg = self.bg_color
if bg:
surface.fill(bg)
self.draw(surface)
bw = self.border_width
if bw:
bc = self.border_color or self.fg_color
frame_rect(surface, bc, surf_rect, bw)
for widget in self.subwidgets:
sub_rect = widget.rect
if debug_rect:
print "Widget: Drawing subwidget %s of %s with rect %s" % (
widget, self, sub_rect)
sub_rect = surf_rect.clip(sub_rect)
if sub_rect.width > 0 and sub_rect.height > 0:
try:
sub = surface.subsurface(sub_rect)
except ValueError as e:
if str(e) == "subsurface rectangle outside surface area":
self.diagnose_subsurface_problem(surface, widget)
else:
raise
else:
widget.draw_all(sub)
self.draw_over(surface)
def diagnose_subsurface_problem(self, surface, widget):
mess = "Widget %s %s outside parent surface %s %s" % (
widget, widget.rect, self, surface.get_rect())
sys.stderr.write("%s\n" % mess)
surface.fill((255, 0, 0), widget.rect)
def draw(self, surface):
pass
def draw_over(self, surface):
pass
def find_widget(self, pos):
for widget in self.subwidgets[::-1]:
if widget.visible:
r = widget.rect
if r.collidepoint(pos):
return widget.find_widget(subtract(pos, r.topleft))
return self
def handle_mouse(self, name, event):
self.augment_mouse_event(event)
self.call_handler(name, event)
self.setup_cursor(event)
def mouse_down(self, event):
self.call_parent_handler("mouse_down", event)
def mouse_up(self, event):
self.call_parent_handler("mouse_up", event)
def augment_mouse_event(self, event):
event.dict['local'] = self.global_to_local(event.pos)
def setup_cursor(self, event):
global current_cursor
cursor = self.get_cursor(event) or arrow_cursor
if cursor is not current_cursor:
set_cursor(*cursor)
current_cursor = cursor
def dispatch_key(self, name, event):
if self.visible:
if event.type == KEYDOWN:
menubar = self._menubar
if menubar and menubar.handle_command_key(event):
return
widget = self.focus_switch
if widget:
widget.dispatch_key(name, event)
else:
self.call_handler(name, event)
else:
self.call_parent_handler(name, event)
def get_focus(self):
widget = self
while 1:
focus = widget.focus_switch
if not focus:
break
widget = focus
return widget
def notify_attention_loss(self):
widget = self
while 1:
if widget.is_modal:
break
parent = widget.parent
if not parent:
break
focus = parent.focus_switch
if focus and focus is not widget:
self.root.notMove = False
focus.dispatch_attention_loss()
widget = parent
def dispatch_attention_loss(self):
widget = self
while widget:
widget.attention_lost()
widget = widget.focus_switch
def attention_lost(self):
pass
def handle_command(self, name, *args):
method = getattr(self, name, None)
if method:
return method(*args)
else:
parent = self.next_handler()
if parent:
return parent.handle_command(name, *args)
def next_handler(self):
if not self.is_modal:
return self.parent
def call_handler(self, name, *args):
method = getattr(self, name, None)
if method:
return method(*args)
else:
return 'pass'
def call_parent_handler(self, name, *args):
parent = self.next_handler()
if parent:
parent.call_handler(name, *args)
def global_to_local(self, p):
return subtract(p, self.local_to_global_offset())
def local_to_global(self, p):
return add(p, self.local_to_global_offset())
def local_to_global_offset(self):
d = self.topleft
parent = self.parent
if parent:
d = add(d, parent.local_to_global_offset())
return d
def key_down(self, event):
k = event.key
#print "Widget.key_down:", k ###
if k == K_RETURN or k == K_KP_ENTER:
if self.enter_response is not None:
self.dismiss(self.enter_response)
return
elif k == K_ESCAPE:
self.root.fix_sticky_ctrl()
if self.cancel_response is not None:
self.dismiss(self.cancel_response)
return
elif k == K_TAB:
self.tab_to_next()
return
self.call_parent_handler('key_down', event)
def key_up(self, event):
self.call_parent_handler('key_up', event)
def is_inside(self, container):
widget = self
while widget:
if widget is container:
return True
widget = widget.parent
return False
@property
def is_hover(self):
return self.root.hover_widget is self
def present(self, centered=True):
#print "Widget: presenting with rect", self.rect
if self.root is None:
self.root = self.get_root()
if "ControlPanel" not in str(self):
self.root.notMove = True
if centered:
self.center = self.root.center
self.root.add(self)
try:
self.root.run_modal(self)
self.dispatch_attention_loss()
finally:
self.root.remove(self)
#print "Widget.present: returning", self.modal_result
if "ControlPanel" not in str(self):
self.root.notMove = False
return self.modal_result
def dismiss(self, value=True):
self.root.notMove = False
self.modal_result = value
def get_root(self):
return root_widget
def get_top_widget(self):
top = self
while top.parent and not top.is_modal:
top = top.parent
return top
def focus(self):
parent = self.next_handler()
if parent:
parent.focus_on(self)
def focus_on(self, subwidget):
old_focus = self.focus_switch
if old_focus is not subwidget:
if old_focus:
old_focus.dispatch_attention_loss()
self.focus_switch = subwidget
self.focus()
def has_focus(self):
return self.is_modal or (self.parent and self.parent.focused_on(self))
def focused_on(self, widget):
return self.focus_switch is widget and self.has_focus()
def focus_chain(self):
result = []
widget = self
while widget:
result.append(widget)
widget = widget.focus_switch
return result
def shrink_wrap(self):
contents = self.subwidgets
if contents:
rects = [widget.rect for widget in contents]
#rmax = Rect.unionall(rects) # broken in PyGame 1.7.1
rmax = rects.pop()
for r in rects:
rmax = rmax.union(r)
self._rect.size = add(rmax.topleft, rmax.bottomright)
#-# Translation live update preparation
self.shrink_wrapped = True
#-#
def invalidate(self):
if self.root:
self.root.bonus_draw_time = False
@staticmethod
def get_cursor(event):
return arrow_cursor
def predict(self, kwds, name):
try:
return kwds[name]
except KeyError:
return theme.root.get(self.__class__, name)
def predict_attr(self, kwds, name):
try:
return kwds[name]
except KeyError:
return getattr(self, name)
def init_attr(self, kwds, name):
try:
return kwds.pop(name)
except KeyError:
return getattr(self, name)
def predict_font(self, kwds, name='font'):
return kwds.get(name) or theme.root.get_font(self.__class__, name)
def get_margin_rect(self):
r = Rect((0, 0), self.size)
d = -2 * self.margin
r.inflate_ip(d, d)
return r
def set_size_for_text(self, width, nlines=1):
if width is not None:
font = self.font
d = 2 * self.margin
if isinstance(width, basestring):
width, height = font.size(width)
width += d + 2
else:
height = font.size("X")[1]
self.size = (width, height * nlines + d)
def tab_to_first(self):
chain = self.get_tab_order()
if chain:
chain[0].focus()
def tab_to_next(self):
top = self.get_top_widget()
chain = top.get_tab_order()
try:
i = chain.index(self)
except ValueError:
return
target = chain[(i + 1) % len(chain)]
target.focus()
def get_tab_order(self):
result = []
self.collect_tab_order(result)
return result
def collect_tab_order(self, result):
if self.visible:
if self.tab_stop:
result.append(self)
for child in self.subwidgets:
child.collect_tab_order(result)
# def tab_to_first(self, start = None):
# if debug_tab:
# print "Enter Widget.tab_to_first:", self ###
# print "...start =", start ###
# if not self.visible:
# if debug_tab: print "...invisible" ###
# self.tab_to_next_in_parent(start)
# elif self.tab_stop:
# if debug_tab: print "...stopping here" ###
# self.focus()
# else:
# if debug_tab: print "...tabbing to next" ###
# self.tab_to_next(start or self)
# if debug_tab: print "Exit Widget.tab_to_first:", self ###
#
# def tab_to_next(self, start = None):
# if debug_tab:
# print "Enter Widget.tab_to_next:", self ###
# print "...start =", start ###
# sub = self.subwidgets
# if sub:
# if debug_tab: print "...tabbing to first subwidget" ###
# sub[0].tab_to_first(start or self)
# else:
# if debug_tab: print "...tabbing to next in parent" ###
# self.tab_to_next_in_parent(start)
# if debug_tab: print "Exit Widget.tab_to_next:", self ###
#
# def tab_to_next_in_parent(self, start):
# if debug_tab:
# print "Enter Widget.tab_to_next_in_parent:", self ###
# print "...start =", start ###
# parent = self.parent
# if parent and not self.is_modal:
# if debug_tab: print "...telling parent to tab to next" ###
# parent.tab_to_next_after(self, start)
# else:
# if self is not start:
# if debug_tab: print "...wrapping back to first" ###
# self.tab_to_first(start)
# if debug_tab: print "Exit Widget.tab_to_next_in_parent:", self ###
#
# def tab_to_next_after(self, last, start):
# if debug_tab:
# print "Enter Widget.tab_to_next_after:", self, last ###
# print "...start =", start ###
# sub = self.subwidgets
# i = sub.index(last) + 1
# if debug_tab: print "...next index =", i, "of", len(sub) ###
# if i < len(sub):
# if debug_tab: print "...tabbing there" ###
# sub[i].tab_to_first(start)
# else:
# if debug_tab: print "...tabbing to next in parent" ###
# self.tab_to_next_in_parent(start)
# if debug_tab: print "Exit Widget.tab_to_next_after:", self, last ###
def inherited(self, attribute):
value = getattr(self, attribute)
if value is not None:
return value
else:
parent = self.next_handler()
if parent:
return parent.inherited(attribute)
def __contains__(self, event):
r = Rect(self._rect)
r.left = 0
r.top = 0
p = self.global_to_local(event.pos)
return r.collidepoint(p)
def get_mouse(self):
return self.root.get_mouse_for(self)
def get_menu_bar(self):
return self._menubar
def set_menu_bar(self, menubar):
if menubar is not self._menubar:
if self._menubar:
self.remove(self._menubar)
self._menubar = menubar
if menubar:
if menubar.width == 0:
menubar.width = self.width
menubar.anchor = 'lr'
self.add(menubar)
def get_is_gl_container(self):
return self._is_gl_container
def set_is_gl_container(self, x):
self._is_gl_container = x
def gl_draw_all(self, root, offset):
if not self.visible:
return
#from OpenGL import GL, GLU
rect = self.rect.move(offset)
if self.is_gl_container:
self.gl_draw_self(root, offset)
suboffset = rect.topleft
for subwidget in self.subwidgets:
subwidget.gl_draw_all(root, suboffset)
else:
try:
surface = Surface(self.size, SRCALPHA)
except:
#size error?
return
self.draw_all(surface)
data = image.tostring(surface, 'RGBA', 1)
w, h = root.size
GL.glViewport(0, 0, w, h)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluOrtho2D(0, w, 0, h)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GL.glRasterPos2i(max(rect.left, 0), max(h - rect.bottom, 0))
GL.glPushAttrib(GL.GL_COLOR_BUFFER_BIT)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glDrawPixels(self.width, self.height,
GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, fromstring(data, dtype='uint8'))
GL.glPopAttrib()
GL.glFlush()
def gl_draw_self(self, root, offset):
pass
|
|
"""Public project views"""
import operator
import os
import json
import logging
import mimetypes
import md5
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.decorators.cache import cache_control
from django.views.generic import ListView, DetailView
from django.utils.datastructures import SortedDict
from django.views.decorators.cache import cache_page
from taggit.models import Tag
import requests
from .base import ProjectOnboardMixin
from readthedocs.builds.constants import LATEST
from readthedocs.builds.filters import VersionSlugFilter
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project, ImportedFile
from readthedocs.search.indexes import PageIndex
from readthedocs.search.views import LOG_TEMPLATE
log = logging.getLogger(__name__)
search_log = logging.getLogger(__name__ + '.search')
mimetypes.add_type("application/epub+zip", ".epub")
class ProjectIndex(ListView):
"""List view of public :py:cls:`Project` instances"""
model = Project
def get_queryset(self):
queryset = Project.objects.public(self.request.user)
if self.kwargs.get('tag'):
self.tag = get_object_or_404(Tag, slug=self.kwargs.get('tag'))
queryset = queryset.filter(tags__name__in=[self.tag.slug])
else:
self.tag = None
if self.kwargs.get('username'):
self.user = get_object_or_404(User, username=self.kwargs.get('username'))
queryset = queryset.filter(user=self.user)
else:
self.user = None
return queryset
def get_context_data(self, **kwargs):
context = super(ProjectIndex, self).get_context_data(**kwargs)
context['person'] = self.user
context['tag'] = self.tag
return context
project_index = ProjectIndex.as_view()
class ProjectDetailView(ProjectOnboardMixin, DetailView):
"""Display project onboard steps"""
model = Project
slug_url_kwarg = 'project_slug'
def get_queryset(self):
return Project.objects.protected(self.request.user)
def get_context_data(self, **kwargs):
context = super(ProjectDetailView, self).get_context_data(**kwargs)
project = self.get_object()
context['versions'] = Version.objects.public(
user=self.request.user, project=project)
context['filter'] = VersionSlugFilter(self.request.GET,
queryset=context['versions'])
protocol = 'http'
if self.request.is_secure():
protocol = 'https'
version_slug = project.get_default_version()
context['badge_url'] = "%s://%s%s?version=%s" % (
protocol,
settings.PRODUCTION_DOMAIN,
reverse('project_badge', args=[project.slug]),
project.get_default_version(),
)
context['site_url'] = "{url}?badge={version}".format(
url=project.get_docs_url(version_slug),
version=version_slug)
return context
def _badge_return(redirect, url):
if redirect:
return HttpResponseRedirect(url)
else:
response = requests.get(url)
http_response = HttpResponse(response.content,
content_type="image/svg+xml")
http_response['Cache-Control'] = 'no-cache'
http_response['Etag'] = md5.new(url)
return http_response
@cache_control(no_cache=True)
def project_badge(request, project_slug, redirect=True):
"""Return a sweet badge for the project"""
version_slug = request.GET.get('version', LATEST)
style = request.GET.get('style', 'flat')
try:
version = Version.objects.public(request.user).get(
project__slug=project_slug, slug=version_slug)
except Version.DoesNotExist:
url = (
'https://img.shields.io/badge/docs-unknown%20version-yellow.svg?style={style}'
.format(style=style))
return _badge_return(redirect, url)
version_builds = version.builds.filter(type='html', state='finished').order_by('-date')
if not version_builds.exists():
url = (
'https://img.shields.io/badge/docs-no%20builds-yellow.svg?style={style}'
.format(style=style))
return _badge_return(redirect, url)
last_build = version_builds[0]
if last_build.success:
color = 'brightgreen'
else:
color = 'red'
url = 'https://img.shields.io/badge/docs-%s-%s.svg?style=%s' % (
version.slug.replace('-', '--'), color, style)
return _badge_return(redirect, url)
def project_downloads(request, project_slug):
"""A detail view for a project with various dataz"""
project = get_object_or_404(Project.objects.protected(request.user), slug=project_slug)
versions = Version.objects.public(user=request.user, project=project)
version_data = SortedDict()
for version in versions:
data = version.get_downloads()
# Don't show ones that have no downloads.
if data:
version_data[version] = data
return render_to_response(
'projects/project_downloads.html',
{
'project': project,
'version_data': version_data,
'versions': versions,
},
context_instance=RequestContext(request),
)
def project_download_media(request, project_slug, type_, version_slug):
"""
Download a specific piece of media.
Perform an auth check if serving in private mode.
.. warning:: This is linked directly from the HTML pages.
It should only care about the Version permissions,
not the actual Project permissions.
"""
version = get_object_or_404(
Version.objects.public(user=request.user),
project__slug=project_slug,
slug=version_slug,
)
privacy_level = getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public')
if privacy_level == 'public' or settings.DEBUG:
path = os.path.join(settings.MEDIA_URL, type_, project_slug, version_slug,
'%s.%s' % (project_slug, type_.replace('htmlzip', 'zip')))
return HttpResponseRedirect(path)
else:
# Get relative media path
path = (version.project
.get_production_media_path(
type_=type_, version_slug=version_slug)
.replace(settings.PRODUCTION_ROOT, '/prod_artifacts'))
content_type, encoding = mimetypes.guess_type(path)
content_type = content_type or 'application/octet-stream'
response = HttpResponse(content_type=content_type)
if encoding:
response["Content-Encoding"] = encoding
response['X-Accel-Redirect'] = path
# Include version in filename; this fixes a long-standing bug
filename = "%s-%s.%s" % (project_slug, version_slug, path.split('.')[-1])
response['Content-Disposition'] = 'filename=%s' % filename
return response
def search_autocomplete(request):
"""Return a json list of project names"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = (Project.objects.public(request.user).filter(name__icontains=term)[:20])
ret_list = []
for project in queryset:
ret_list.append({
'label': project.name,
'value': project.slug,
})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, content_type='text/javascript')
def version_autocomplete(request, project_slug):
"""Return a json list of version names"""
queryset = Project.objects.public(request.user)
get_object_or_404(queryset, slug=project_slug)
versions = Version.objects.public(request.user)
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
version_queryset = versions.filter(slug__icontains=term)[:20]
names = version_queryset.values_list('slug', flat=True)
json_response = json.dumps(list(names))
return HttpResponse(json_response, content_type='text/javascript')
def version_filter_autocomplete(request, project_slug):
queryset = Project.objects.public(request.user)
project = get_object_or_404(queryset, slug=project_slug)
versions = Version.objects.public(request.user)
version_filter = VersionSlugFilter(request.GET, queryset=versions)
resp_format = request.GET.get('format', 'json')
if resp_format == 'json':
names = version_filter.qs.values_list('slug', flat=True)
json_response = json.dumps(list(names))
return HttpResponse(json_response, content_type='text/javascript')
elif resp_format == 'html':
return render_to_response(
'core/version_list.html',
{
'project': project,
'versions': versions,
'filter': version_filter,
},
context_instance=RequestContext(request),
)
else:
return HttpResponse(status=400)
def file_autocomplete(request, project_slug):
"""Return a json list of file names"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = ImportedFile.objects.filter(project__slug=project_slug, path__icontains=term)[:20]
ret_list = []
for filename in queryset:
ret_list.append({
'label': filename.path,
'value': filename.path,
})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, content_type='text/javascript')
def elastic_project_search(request, project_slug):
"""Use elastic search to search in a project"""
queryset = Project.objects.protected(request.user)
project = get_object_or_404(queryset, slug=project_slug)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
if query:
user = ''
if request.user.is_authenticated():
user = request.user
log.info(LOG_TEMPLATE.format(
user=user,
project=project or '',
type='inproject',
version=version_slug or '',
language='',
msg=query or '',
))
if query:
kwargs = {}
body = {
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"headers": {"query": query, "boost": 5}}},
{"match": {"content": {"query": query}}},
]
}
},
"highlight": {
"fields": {
"title": {},
"headers": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path"],
"filter": {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
},
"size": 50 # TODO: Support pagination.
}
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
results = PageIndex().search(body, **kwargs)
else:
results = {}
if results:
# pre and post 1.0 compat
for num, hit in enumerate(results['hits']['hits']):
for key, val in hit['fields'].items():
if isinstance(val, list):
results['hits']['hits'][num]['fields'][key] = val[0]
return render_to_response(
'search/elastic_project_search.html',
{
'project': project,
'query': query,
'results': results,
},
context_instance=RequestContext(request),
)
def project_versions(request, project_slug):
"""Project version list view
Shows the available versions and lets the user choose which ones to build.
"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
versions = Version.objects.public(user=request.user, project=project, only_active=False)
active_versions = versions.filter(active=True)
inactive_versions = versions.filter(active=False)
inactive_filter = VersionSlugFilter(request.GET, queryset=inactive_versions)
active_filter = VersionSlugFilter(request.GET, queryset=active_versions)
# If there's a wiped query string, check the string against the versions
# list and display a success message. Deleting directories doesn't know how
# to fail. :)
wiped = request.GET.get('wipe', '')
wiped_version = versions.filter(slug=wiped)
if wiped and wiped_version.count():
messages.success(request, 'Version wiped: ' + wiped)
return render_to_response(
'projects/project_version_list.html',
{
'inactive_filter': inactive_filter,
'active_filter': active_filter,
'project': project,
},
context_instance=RequestContext(request)
)
def project_analytics(request, project_slug):
"""Have a analytics API placeholder"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
analytics_cache = cache.get('analytics:%s' % project_slug)
if analytics_cache:
analytics = json.loads(analytics_cache)
else:
try:
resp = requests.get(
'{host}/api/v1/index/1/heatmap/'.format(host=settings.GROK_API_HOST),
params={'project': project.slug, 'days': 7, 'compare': True}
)
analytics = resp.json()
cache.set('analytics:%s' % project_slug, resp.content, 1800)
except requests.exceptions.RequestException:
analytics = None
if analytics:
page_list = list(reversed(sorted(analytics['page'].items(),
key=operator.itemgetter(1))))
version_list = list(reversed(sorted(analytics['version'].items(),
key=operator.itemgetter(1))))
else:
page_list = []
version_list = []
full = request.GET.get('full')
if not full:
page_list = page_list[:20]
version_list = version_list[:20]
return render_to_response(
'projects/project_analytics.html',
{
'project': project,
'analytics': analytics,
'page_list': page_list,
'version_list': version_list,
'full': full,
},
context_instance=RequestContext(request)
)
def project_embed(request, project_slug):
"""Have a content API placeholder"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
version = project.versions.get(slug=LATEST)
files = version.imported_files.order_by('path')
return render_to_response(
'projects/project_embed.html',
{
'project': project,
'files': files,
'settings': {
'GROK_API_HOST': settings.GROK_API_HOST,
'URI': request.build_absolute_uri(location='/').rstrip('/')
}
},
context_instance=RequestContext(request)
)
|
|
import numpy as np
from PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import widget
import orangecanvas.resources as resources
import sys
import os
from crystalpy.diffraction.DiffractionSetup import DiffractionSetup
from crystalpy.diffraction.Diffraction import Diffraction
from crystalpy.diffraction.GeometryType import BraggDiffraction, BraggTransmission, LaueDiffraction, LaueTransmission
from crystalpy.util.PolarizedPhotonBunch import PolarizedPhotonBunch
class OWCrystal(widget.OWWidget):
name = "Crystal"
id = "orange.widgets.dataCrystal"
description = "Application to compute..."
icon = "icons/Crystal.png"
author = "create_widget.py"
maintainer_email = "cappelli@esrf.fr"
priority = 25
category = ""
keywords = ["oasyscrystalpy", "crystalpy", "Crystal"]
# the widget takes in a collection of Photon objects and
# sends out an object of the same type made up of scattered photons.
inputs = [{"name": "photon bunch",
"type": PolarizedPhotonBunch,
"handler": "_set_input",
"doc": ""}]
outputs = [{"name": "photon bunch",
"type": PolarizedPhotonBunch,
"doc": "transfer diffraction results"},
]
want_main_area = False
GEOMETRY_TYPE = Setting(0) # Bragg diffraction
CRYSTAL_NAME = Setting(0) # Si
THICKNESS = Setting(0.01) # centimeters
MILLER_H = Setting(1) # int
MILLER_K = Setting(1) # int
MILLER_L = Setting(1) # int
ASYMMETRY_ANGLE = Setting(0.0) # degrees
AZIMUTHAL_ANGLE = Setting(90.0) # degrees
INCLINATION_ANGLE = Setting(45.0) # degrees
DUMP_TO_FILE = Setting(1) # Yes
FILE_NAME = Setting("crystal.dat")
def __init__(self):
super().__init__()
self._input_available = False
# Define a tuple of crystals to choose from.
self.crystal_names = ("Si", "Ge", "Diamond")
box0 = gui.widgetBox(self.controlArea, " ", orientation="horizontal")
# widget buttons: compute, set defaults, help
gui.button(box0, self, "Compute", callback=self.compute)
gui.button(box0, self, "Defaults", callback=self.defaults)
gui.button(box0, self, "Help", callback=self.get_doc)
box = gui.widgetBox(self.controlArea, " ", orientation="vertical")
idx = -1
# widget index 0
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "GEOMETRY_TYPE",
label=self.unitLabels()[idx], addSpace=True,
items=["Bragg diffraction", "Bragg transmission", "Laue diffraction", "Laue Transmission"],
orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
# widget index 1
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "CRYSTAL_NAME",
label=self.unitLabels()[idx], addSpace=True,
items=['Si', 'Ge', 'Diamond'],
orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
# widget index 2
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "THICKNESS",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 3
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "MILLER_H",
label=self.unitLabels()[idx], addSpace=True,
valueType=int, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 4
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "MILLER_K",
label=self.unitLabels()[idx], addSpace=True,
valueType=int, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 5
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "MILLER_L",
label=self.unitLabels()[idx], addSpace=True,
valueType=int, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 6
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "ASYMMETRY_ANGLE",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 7
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "AZIMUTHAL_ANGLE",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 8
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "INCLINATION_ANGLE",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
# widget index 9
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "DUMP_TO_FILE",
label=self.unitLabels()[idx], addSpace=True,
items=["No", "Yes"],
orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
# widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "FILE_NAME",
label=self.unitLabels()[idx], addSpace=True)
self.show_at(self.unitFlags()[idx], box1)
self.process_showers()
print("Crystal: Crystal initialized.\n")
gui.rubber(self.controlArea)
def _set_input(self, photon_bunch):
if photon_bunch is not None:
self._input_available = True
self.incoming_bunch = photon_bunch
self.compute()
def unitLabels(self):
return ["Geometry type", "Crystal name", "Thickness [cm]", "Miller H", "Miller K", "Miller L",
"Asymmetry angle [deg]", "Azimuthal angle [deg]",
"Inclination angle [deg]", "Dump to file", "File name"]
def unitFlags(self):
return ["True", "True", "True", "True", "True", "True",
"True", "True",
"True", "True", "self.DUMP_TO_FILE == 1"]
def compute(self):
if not self._input_available:
raise Exception("Crystal: Input data not available!\n")
# Translate CRYSTAL_TYPE (int) into a crystal name (string).
CRYSTAL_NAME = self.crystal_names[self.CRYSTAL_NAME]
outgoing_bunch = OWCrystal.calculate_external_Crystal(GEOMETRY_TYPE=self.GEOMETRY_TYPE,
CRYSTAL_NAME=CRYSTAL_NAME,
THICKNESS=self.THICKNESS,
MILLER_H=self.MILLER_H,
MILLER_K=self.MILLER_K,
MILLER_L=self.MILLER_L,
ASYMMETRY_ANGLE=self.ASYMMETRY_ANGLE,
AZIMUTHAL_ANGLE=self.AZIMUTHAL_ANGLE,
incoming_bunch=self.incoming_bunch,
INCLINATION_ANGLE=self.INCLINATION_ANGLE,
DUMP_TO_FILE=self.DUMP_TO_FILE,
FILE_NAME=self.FILE_NAME)
self.send("photon bunch", outgoing_bunch)
print("Crystal: The results were sent to the viewer.\n")
def defaults(self):
self.resetSettings()
self.compute()
return
def get_doc(self):
print("Crystal: help pressed.\n")
home_doc = resources.package_dirname("orangecontrib.oasyscrystalpy") + "/doc_files/"
filename1 = os.path.join(home_doc, 'CrystalActive'+'.txt')
print("Crystal: Opening file %s\n" % filename1)
if sys.platform == 'darwin':
command = "open -a TextEdit "+filename1+" &"
elif sys.platform == 'linux':
command = "gedit "+filename1+" &"
else:
raise Exception("Crystal: sys.platform did not yield an acceptable value!\n")
os.system(command)
@staticmethod
def calculate_external_Crystal(GEOMETRY_TYPE,
CRYSTAL_NAME,
THICKNESS,
MILLER_H,
MILLER_K,
MILLER_L,
ASYMMETRY_ANGLE,
AZIMUTHAL_ANGLE,
incoming_bunch,
INCLINATION_ANGLE,
DUMP_TO_FILE,
FILE_NAME="tmp.dat"):
# Create a GeometryType object:
# Bragg diffraction = 0
# Bragg transmission = 1
# Laue diffraction = 2
# Laue transmission = 3
if GEOMETRY_TYPE == 0:
GEOMETRY_TYPE_OBJECT = BraggDiffraction()
elif GEOMETRY_TYPE == 1:
GEOMETRY_TYPE_OBJECT = BraggTransmission()
elif GEOMETRY_TYPE == 2:
GEOMETRY_TYPE_OBJECT = LaueDiffraction()
elif GEOMETRY_TYPE == 3:
GEOMETRY_TYPE_OBJECT = LaueTransmission()
else:
raise Exception("Crystal: The geometry type could not be interpreted!\n")
# Create a diffraction setup.
# At this stage I translate angles in radians, energy in eV and all other values in SI units.
print("Crystal: Creating a diffraction setup...\n")
diffraction_setup = DiffractionSetup(geometry_type=GEOMETRY_TYPE_OBJECT, # GeometryType object
crystal_name=str(CRYSTAL_NAME), # string
thickness=float(THICKNESS) * 1e-2, # meters
miller_h=int(MILLER_H), # int
miller_k=int(MILLER_K), # int
miller_l=int(MILLER_L), # int
asymmetry_angle=float(ASYMMETRY_ANGLE) / 180 * np.pi, # radians
azimuthal_angle=float(AZIMUTHAL_ANGLE) / 180 * np.pi) # radians
# incoming_photons=incoming_bunch)
# Create a Diffraction object.
diffraction = Diffraction()
# Create a PolarizedPhotonBunch object holding the results of the diffraction calculations.
print("Crystal: Calculating the outgoing photons...\n")
outgoing_bunch = diffraction.calculateDiffractedPolarizedPhotonBunch(diffraction_setup,
incoming_bunch,
INCLINATION_ANGLE)
# Check that the result of the calculation is indeed a PolarizedPhotonBunch object.
if not isinstance(outgoing_bunch, PolarizedPhotonBunch):
raise Exception("Crystal: Expected PolarizedPhotonBunch as a result, found {}!\n".format(type(outgoing_bunch)))
# Dump data to file if requested.
if DUMP_TO_FILE == 0:
print("Crystal: Writing data in {file}...\n".format(file=FILE_NAME))
with open(FILE_NAME, "w") as file:
try:
file.write("#S 1 photon bunch\n"
"#N 8\n"
"#L Energy [eV] Vx Vy Vz S0 S1 S2 S3\n")
file.write(outgoing_bunch.toString())
file.close()
print("File written to disk: %s"%FILE_NAME)
except:
raise Exception("Crystal: The data could not be dumped onto the specified file!\n")
return outgoing_bunch
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWCrystal()
w.show()
app.exec()
w.saveSettings()
|
|
#! /usr/bin/env python
# Macro to scale histograms of all Pt-hard bins, using xsec and ntrials from AliAnalysisTaskPWGJEQA.
# This script expects files X/AnalysisResultsPtHardX.root, and will output scaled histograms
# to the same file, in a new output list with suffix "Scaled". The script will automatically loop over
# all output lists, subject to some simple criteria that covers basic use cases (can be adapted as needed).
#
# There is an option "bRemoveOutliers" (off by default) to remove outliers from certain histograms. The features are
# currently hard-coded below so you will need to modify the code as needed. This feature is adapted from code of Raymond Ehlers.
#
# To get scale factors from a reference file, use the option "-f".
#
# Author: James Mulligan (james.mulligan@yale.edu)
#
import ROOT
import argparse
import ctypes
###################################################################################
# Main function
def scalePtHardHistos(referenceFile):
PtHardBins = 20
ptHardLo = [ 5, 7, 9, 12, 16, 21, 28, 36, 45, 57, 70, 85, 99, 115, 132, 150, 169, 190, 212, 235 ]
ptHardHi = [ 7, 9, 12, 16, 21, 28, 36, 45, 57, 70, 85, 99, 115, 132, 150, 169, 190, 212, 235, -1 ]
# Option to remove outliers from specified histograms (see below)
bRemoveOutliers = False
# Get a list of all the output list names, in order to scale all of them
f = ROOT.TFile("1/AnalysisResultsPtHard1.root", "READ")
qaListKeys = f.GetListOfKeys()
qaListNames = []
eventList = ""
for key in qaListKeys:
name = key.GetName()
# Get all relevant lists for which scaling should be done
if referenceFile:
if "PWGJEQA" in name or "Jet" in name or "Emcal" in name: # For the case of using reference scale factors, we want essentially all user tasks
qaListNames.append(name)
else:
if "PWGJEQA" in name: # For the case of computing the scale factors, we want only the PWGJEQA task
qaListNames.append(name)
# Get a list that has the event histograms
if "PWGJEQA" in name or "JetPerformance" in name:
eventList = name
# Note: In the case of embedding, we assume that we only need the number of accepted events, and that internal event selection
# is activated, in which case the event count can be read from any task that has the internal event selection applied
print "Using " + eventList + " for event list."
f.Close()
# Create histogram of NEvents accepted and NEvents acc+rej, as a function of pT-hard bin
hNEventsAcc = ROOT.TH1F("hNEventsAcc", "hNEventsAccepted", PtHardBins+1, 0, PtHardBins+1)
hNEventsTot = ROOT.TH1F("hNEventsTot", "hNEventsTotal", PtHardBins+1, 0, PtHardBins+1)
nEventsAccSum = 0
for bin in range(0,PtHardBins):
hNEventsAcc.GetXaxis().SetBinLabel(bin+1, "%d-%d" % (ptHardLo[bin],ptHardHi[bin]))
hNEventsTot.GetXaxis().SetBinLabel(bin+1, "%d-%d" % (ptHardLo[bin],ptHardHi[bin]))
for bin in range(0,PtHardBins):
GetNEvents(eventList, bin, hNEventsTot, bAcceptedEventsOnly=False)
nEventsAcc = GetNEvents(eventList, bin, hNEventsAcc, bAcceptedEventsOnly=True)
nEventsAccSum += nEventsAcc
nEventsAccAvg = nEventsAccSum/PtHardBins
# If a reference file is provided, get the scale factors from there, and scale histos
if referenceFile:
print "Scaling from reference file: " + referenceFile
for bin in range(0,PtHardBins):
# Open ref file and get scale factor for given bin
refFile = ROOT.TFile(referenceFile, "READ")
scaleFactorHist = refFile.Get("hScaleFactor")
scaleFactor = scaleFactorHist.GetBinContent(bin+1)
refFile.Close()
# Open input file and get relevant lists
inputFile = "{0}/AnalysisResultsPtHard{0}.root".format(bin+1)
print("Scaling Pt-hard bin %d" % (bin+1))
f = ROOT.TFile(inputFile, "UPDATE")
# Scale further to account for a different number of events in each pT-hard bin
hNEventsAccepted = f.Get("hNEventsAcc") # Get NEvents histos from file, since we undo the entry after writing it
nEventsAcc = hNEventsAccepted.GetBinContent(bin+1)
eventScaleFactor = nEventsAccAvg/nEventsAcc
print "nEventsAcc: {0}".format(nEventsAcc)
print "scaleFactor: {0}".format(scaleFactor)
print "eventScaleFactor: {0}".format(eventScaleFactor)
for qaListName in qaListNames:
qaList = f.Get(qaListName)
# Now, scale all the histograms
print "Scaling list: " + qaList.GetName()
for obj in qaList:
ScaleAllHistograms(obj, scaleFactor * eventScaleFactor, f, bRemoveOutliers)
# Write the histograms to file
qaList.Write("%sScaled" % qaListName, ROOT.TObject.kSingleKey)
f.Close()
# If no reference file is provided, compute the scale factors and write them, and scale the histos
else:
hXSecPerEvent = ROOT.TH1F("hXSecPerEvent", "hXSecPerEvent", PtHardBins+1, 0, PtHardBins+1)
hNTrialsPerEvent = ROOT.TH1F("hNTrialsPerEvent", "hNTrialsPerEvent", PtHardBins+1, 0, PtHardBins+1)
hScaleFactor = ROOT.TH1F("hScaleFactor", "hScaleFactor", PtHardBins+1, 0, PtHardBins+1)
for bin in range(0,PtHardBins):
# Label histograms
hXSecPerEvent.GetXaxis().SetBinLabel(bin+1, "%d-%d" % (ptHardLo[bin],ptHardHi[bin]))
hNTrialsPerEvent.GetXaxis().SetBinLabel(bin+1, "%d-%d" % (ptHardLo[bin],ptHardHi[bin]))
hScaleFactor.GetXaxis().SetBinLabel(bin+1, "%d-%d" % (ptHardLo[bin],ptHardHi[bin]))
for bin in range(0,PtHardBins):
# Open input file and get relevant lists
inputFile = "{0}/AnalysisResultsPtHard{0}.root".format(bin+1)
print("Scaling Pt-hard bin %d" % (bin+1))
f = ROOT.TFile(inputFile, "UPDATE")
qaList = f.Get(qaListNames[0])
print "Computing scaling factors with list: " + qaList.GetName()
hXsecPtHard = qaList.FindObject("hXsec")
hTrialsPtHard = qaList.FindObject("hNtrials")
hNEventsTotal = f.Get("hNEventsTot") # Get NEvents histos from file, since we undo the entry after writing it
hNEventsAccepted = f.Get("hNEventsAcc")
# Compute: scale factor = xsec per event / trials per event
nEventsTot = hNEventsTotal.GetBinContent(bin+1)
nEventsAcc = hNEventsAccepted.GetBinContent(bin+1)
xsec = hXsecPtHard.GetBinContent(1) / hXsecPtHard.GetEntries()
trials = 1.*hTrialsPtHard.GetBinContent(1) / nEventsTot
scaleFactor = xsec/trials
eventScaleFactor = nEventsAccAvg/nEventsAcc # also scale to account that there are different number of events in each Pt-hard bin
print "nEventsAcc: {0}".format(nEventsAcc)
print "nEventsTot: {0}".format(nEventsTot)
print "scaleFactor: {0}".format(scaleFactor)
print "eventScaleFactor: {0}".format(eventScaleFactor)
hXSecPerEvent.Fill(bin+0.5, xsec)
hNTrialsPerEvent.Fill(bin+0.5, trials)
hScaleFactor.Fill(bin+0.5, scaleFactor)
# Now, scale all the histograms
print "Scaling list: " + qaList.GetName()
for obj in qaList:
ScaleAllHistograms(obj, scaleFactor * eventScaleFactor, f, bRemoveOutliers)
# Write the histograms to file
hXSecPerEvent.Write()
hXSecPerEvent.Reset()
hNTrialsPerEvent.Write()
hNTrialsPerEvent.Reset()
hScaleFactor.Write()
hScaleFactor.Reset()
qaList.Write("%sScaled" % qaListNames[0], ROOT.TObject.kSingleKey)
f.Close()
###################################################################################
# Given event list name eventList, pT-hard bin number, and histogram hNEvents of appropriate form, this function fills
# the number of events (accepted events only if bAcceptedEventsOnly=True, otherwise all events)
def GetNEvents(eventList, bin, hNEvents, bAcceptedEventsOnly = True):
if bin is 0:
if bAcceptedEventsOnly:
print "Getting accepted number of events..."
else:
print "Getting total (acc+rej) number of events..."
inputFile = "{0}/AnalysisResultsPtHard{0}.root".format(bin+1)
f = ROOT.TFile(inputFile, "UPDATE")
qaList = f.Get(eventList)
nEvents = 0
# Look for the EventCutOutput from AliEventCuts, and if it doesn't exist, look for histo fHistEventCount
eventCutList = qaList.FindObject("EventCutOutput")
if eventCutList:
hNEventsPtHard = eventCutList.FindObject("fCutStats")
if bAcceptedEventsOnly:
nEvents = hNEventsPtHard.GetBinContent(16)
else:
nEvents = hNEventsPtHard.GetBinContent(1)
if bin is 0:
print "from EventCutOutput."
else:
hNEventsPtHard = qaList.FindObject("fHistEventCount")
if bAcceptedEventsOnly:
nEvents = hNEventsPtHard.GetBinContent(1)
else:
nEvents = hNEventsPtHard.GetBinContent(1) + hNEventsPtHard.GetBinContent(2)
if bin is 0:
print "from fHistEventCount."
hNEvents.Fill(bin+0.5, nEvents)
hNEvents.Write()
hNEvents.Fill(bin+0.5,-nEvents) # undo the entry, since we will later hadd the histograms
f.Close()
return nEvents
###################################################################################
# Function to iterate recursively through an object to scale all TH1/TH2/THnSparse
def ScaleAllHistograms(obj, scaleFactor, f, bRemoveOutliers=False):
if obj.InheritsFrom(ROOT.TProfile.Class()):
print("TProfile %s not scaled..." % obj.GetName())
elif obj.InheritsFrom(ROOT.TH3.Class()):
obj.Sumw2()
if bRemoveOutliers:
name = obj.GetName()
if "JESshiftEMCal" in name or "ResponseMatrixEMCal" in name or "hNEFVsPtEMCal" in name:
removeOutliers(obj, 2.)
obj.Scale(scaleFactor)
print("TH3 %s was scaled..." % obj.GetName())
elif obj.InheritsFrom(ROOT.TH2.Class()):
obj.Sumw2()
obj.Scale(scaleFactor)
print("TH2 %s was scaled..." % obj.GetName())
elif obj.InheritsFrom(ROOT.TH1.Class()):
obj.Sumw2()
obj.Scale(scaleFactor)
print("TH1 %s was scaled..." % obj.GetName())
elif obj.InheritsFrom(ROOT.THnSparse.Class()):
obj.Sumw2()
obj.Scale(scaleFactor)
print("THnSparse %s was scaled..." % obj.GetName())
else:
print("Not a histogram!")
print obj.GetName()
for subobj in obj:
ScaleAllHistograms(subobj, scaleFactor, f, bRemoveOutliers)
###################################################################################
# Function to remove outliers from a TH3 (i.e. truncate the spectrum), based on projecting to the y-axis
# It truncates the 3D histogram based on when the 1D projection 4-bin moving average has been above
# "limit" for "nBinsThreshold" bins.
def removeOutliers(hist, limit):
print "Performing outlier removal on {}".format(hist.GetName())
histToCheck = hist.ProjectionY("{histName}_projBefore".format(histName = hist.GetName()))
#plotHist(hist, "./{}_{}.pdf".format(hist.GetName(), "Before"))
#plotHist(histToCheck, "./{}.pdf".format(histToCheck.GetName()), "hist E", True)
# Check with moving average
foundAboveLimit = False
cutLimitReached = False
# The cut index is where we decided cut on that row
cutIndex = -1
nBinsBelowLimitAfterLimit = 0
# n bins that are below threshold before all bins are cut
nBinsThreshold = 4
(preMean, preMedian) = GetHistMeanAndMedian(histToCheck)
for index in range(0, histToCheck.GetNcells()):
print("---------")
avg = MovingAverage(histToCheck, index = index, numberOfCountsBelowIndex = 2, numberOfCountsAboveIndex = 2)
print("Index: {0}, Avg: {1}, BinContent: {5}, foundAboveLimit: {2}, cutIndex: {3}, cutLimitReached: {4}".format(index, avg, foundAboveLimit, cutIndex, cutLimitReached, histToCheck.GetBinContent(index)))
if avg > limit:
foundAboveLimit = True
if not cutLimitReached:
if foundAboveLimit and avg <= limit:
if cutIndex == -1:
cutIndex = index
nBinsBelowLimitAfterLimit += 1
if nBinsBelowLimitAfterLimit != 0 and avg > limit:
# Reset
cutIndex = -1
nBinsBelowLimitAfterLimit = 0
if nBinsBelowLimitAfterLimit > nBinsThreshold:
cutLimitReached = True
# Do not perform removal here because then we miss values between the avg going below
# the limit and crossing the nBinsThreshold
print("Hist checked: {0}, cut index: {1}".format(histToCheck.GetName(), cutIndex))
# Use on both TH1 and TH2 since we don't start removing immediately, but instead only after the limit
if cutLimitReached:
print "Removing outliers"
# Check for values above which they should be removed by translating the global index
x = ctypes.c_int(0)
y = ctypes.c_int(0)
z = ctypes.c_int(0)
for index in range(0, hist.GetNcells()):
# Get the bin x, y, z from the global bin
hist.GetBinXYZ(index, x, y, z)
if y.value >= cutIndex:
if hist.GetBinContent(index) > 1e-3:
print("Cutting for index {}. y bin {}. Cut index: {}".format(index, y, cutIndex))
hist.SetBinContent(index, 0)
hist.SetBinError(index, 0)
else:
print "Hist {} did not have any outliers to cut".format(hist.GetName())
# Check the mean and median
# Use another temporary hist
histToCheck = hist.ProjectionY("{histName}_projAfter".format(histName = hist.GetName()))
(postMean, postMedian) = GetHistMeanAndMedian(histToCheck)
print "Pre outliers removal mean: {}, median: {}".format(preMean, preMedian)
print "Post outliers removal mean: {}, median: {}".format(postMean, postMedian)
#plotHist(hist, "./{}_{}.pdf".format(hist.GetName(), "After"))
#plotHist(histToCheck, "./{}.pdf".format(histToCheck.GetName()), "hist E", True)
def GetHistMeanAndMedian(hist):
# Median
# See: https://root-forum.cern.ch/t/median-of-histogram/7626/5
x = ctypes.c_double(0)
q = ctypes.c_double(0.5)
# Apparently needed to be safe(?)
hist.ComputeIntegral()
hist.GetQuantiles(1, x, q)
mean = hist.GetMean()
return (mean, x.value)
def MovingAverage(hist, index, numberOfCountsBelowIndex = 0, numberOfCountsAboveIndex = 2):
"""
# [-2, 2] includes -2, -1, 0, 1, 2
"""
# Check inputs
if numberOfCountsBelowIndex < 0 or numberOfCountsAboveIndex < 0:
print "Moving average number of counts above or below must be >= 0. Please check the values!"
count = 0.
average = 0.
for i in range(index - numberOfCountsBelowIndex, index + numberOfCountsAboveIndex + 1):
# Avoid going over histogram limits
if i < 0 or i >= hist.GetNcells():
continue
#print("Adding {}".format(hist.GetBinContent(i)))
average += hist.GetBinContent(i)
count += 1
#if count != (numberOfCountsBelowIndex + numberOfCountsAboveIndex + 1):
# print("Count: {}, summed: {}".format(count, (numberOfCountsBelowIndex + numberOfCountsAboveIndex + 1)))
#exit(0)
return average / count
########################################################################################################
# Plot basic histogram ##############################################################################
########################################################################################################
def plotHist(h, outputFilename, drawOptions = "", setLogy = False):
c = ROOT.TCanvas("c","c: hist",600,450)
c.cd()
if setLogy:
c.SetLogy()
h.Draw(drawOptions)
c.SaveAs(outputFilename)
c.Close()
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
print("Executing scalePtHardHistos.py...")
# Define arguments
parser = argparse.ArgumentParser(description="Scale pT-hard bins")
parser.add_argument("-f", "--referenceFile", action="store",
type=str, metavar="referenceFile",
default="",
help="Reference file to get pT-hard scale factors from")
# Parse the arguments
args = parser.parse_args()
scalePtHardHistos(args.referenceFile)
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import os
import re
import struct
import subprocess
import sys
import tempfile
import test_format
BUNDLE_SIZE = 32
def CreateElfContent(bits, text_segment):
e_ident = {
32: '\177ELF\1',
64: '\177ELF\2'}[bits]
e_machine = {
32: 3,
64: 62}[bits]
e_phoff = 256
e_phnum = 1
e_phentsize = 0
elf_header_fmt = {
32: '<16sHHIIIIIHHHHHH',
64: '<16sHHIQQQIHHHHHH'}[bits]
elf_header = struct.pack(
elf_header_fmt,
e_ident, 0, e_machine, 0, 0, e_phoff, 0, 0, 0,
e_phentsize, e_phnum, 0, 0, 0)
p_type = 1 # PT_LOAD
p_flags = 5 # r-x
p_filesz = len(text_segment)
p_memsz = p_filesz
p_vaddr = 0
p_offset = 512
p_align = 0
p_paddr = 0
pheader_fmt = {
32: '<IIIIIIII',
64: '<IIQQQQQQ'}[bits]
pheader_fields = {
32: (p_type, p_offset, p_vaddr, p_paddr,
p_filesz, p_memsz, p_flags, p_align),
64: (p_type, p_flags, p_offset, p_vaddr,
p_paddr, p_filesz, p_memsz, p_align)}[bits]
pheader = struct.pack(pheader_fmt, *pheader_fields)
result = elf_header
assert len(result) <= e_phoff
result += '\0' * (e_phoff - len(result))
result += pheader
assert len(result) <= p_offset
result += '\0' * (p_offset - len(result))
result += text_segment
return result
def RunRdfaValidator(options, data):
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
# TODO(shcherbina): get rid of custom prefix once
# https://code.google.com/p/nativeclient/issues/detail?id=3631
# is actually fixed.
tmp = tempfile.NamedTemporaryFile(prefix='tmprdfa_', mode='wb', delete=False)
try:
tmp.write(CreateElfContent(options.bits, data))
tmp.close()
proc = subprocess.Popen([options.rdfaval, tmp.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
assert stdout == '', stdout
return_code = proc.wait()
finally:
tmp.close()
os.remove(tmp.name)
# Remove the carriage return characters that we get on Windows.
stderr = stderr.replace('\r', '')
return return_code, stderr
def ParseRdfaMessages(stdout):
"""Get (offset, message) pairs from rdfa validator output.
Args:
stdout: Output of rdfa validator as string.
Yields:
Pairs (offset, message).
"""
for line in stdout.split('\n'):
line = line.strip()
if line == '':
continue
if re.match(r"(Valid|Invalid)\.$", line):
continue
m = re.match(r'([0-9a-f]+): (.*)$', line, re.IGNORECASE)
assert m is not None, "can't parse line '%s'" % line
offset = int(m.group(1), 16)
message = m.group(2)
if not message.startswith('warning - '):
yield offset, message
def CheckValidJumpTargets(options, data_chunks):
"""
Check that the validator infers valid jump targets correctly.
This test checks that the validator identifies instruction boundaries and
superinstructions correctly. In order to do that, it attempts to append a jump
to each byte at the end of the given code. Jump should be valid if and only if
it goes to the boundary between data chunks.
Note that the same chunks as in RunRdfaWithNopPatching are used, but here they
play a different role. In RunRdfaWithNopPatching the partitioning into chunks
is only relevant when the whole snippet is invalid. Here, on the other hand,
we only care about valid snippets, and we use chunks to mark valid jump
targets.
Args:
options: Options as produced by optparse.
data_chunks: List of strings containing binary data. Each such chunk is
expected to correspond to indivisible instruction or superinstruction.
Returns:
None.
"""
data = ''.join(data_chunks)
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
# Since we check validity of jump target by adding jump and validating
# resulting piece, we rely on validity of original snippet.
return_code, _ = RunRdfaValidator(options, data)
assert return_code == 0, 'Can only validate jump targets on valid snippet'
valid_jump_targets = set()
pos = 0
for data_chunk in data_chunks:
valid_jump_targets.add(pos)
pos += len(data_chunk)
valid_jump_targets.add(pos)
for i in range(pos + 1):
# Encode JMP with 32-bit relative target.
jump = '\xe9' + struct.pack('<i', i - (len(data) + 5))
return_code, _ = RunRdfaValidator(options, data + jump)
if return_code == 0:
assert i in valid_jump_targets, (
'Offset 0x%x was reported valid jump target' % i)
else:
assert i not in valid_jump_targets, (
'Offset 0x%x was reported invalid jump target' % i)
class RdfaTestRunner(test_format.TestRunner):
SECTION_NAME = 'rdfa_output'
def CommandLineOptions(self, parser):
parser.add_option('--rdfaval', default='validator_test',
help='Path to the ncval validator executable')
def GetSectionContent(self, options, sections):
data_chunks = list(test_format.ParseHex(sections['hex']))
return_code, stdout = RunRdfaValidator(options, ''.join(data_chunks))
result = ''.join('%x: %s\n' % (offset, message)
for offset, message in ParseRdfaMessages(stdout))
result += 'return code: %d\n' % return_code
if return_code == 0:
print(' Checking jump targets...')
CheckValidJumpTargets(options, data_chunks)
return result
def main(argv):
RdfaTestRunner().Run(argv)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import range
from builtins import object
import ast
import base64
import datetime
import json
import logging
import sys
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from django.urls import reverse
from enum import Enum
from TCLIService.ttypes import TSessionHandle, THandleIdentifier, TOperationState, TOperationHandle, TOperationType
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document, Document2
from desktop.redaction import global_redaction_engine
from librdbms.server import dbms as librdbms_dbms
from useradmin.models import User
from beeswax.design import HQLdesign
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _, gettext_lazy as _t
else:
from django.utils.translation import ugettext as _, ugettext_lazy as _t
LOG = logging.getLogger(__name__)
QUERY_SUBMISSION_TIMEOUT = datetime.timedelta(0, 60 * 60) # 1 hour
# Constants for DB fields, hue ini
BEESWAX = 'beeswax'
HIVE_SERVER2 = 'hiveserver2'
QUERY_TYPES = (HQL, IMPALA, RDBMS, SPARK, HPLSQL) = list(range(5))
class QueryHistory(models.Model):
"""
Holds metadata about all queries that have been executed.
"""
class STATE(Enum):
submitted = 0
running = 1
available = 2
failed = 3
expired = 4
SERVER_TYPE = ((BEESWAX, 'Beeswax'), (HIVE_SERVER2, 'Hive Server 2'),
(librdbms_dbms.MYSQL, 'MySQL'), (librdbms_dbms.POSTGRESQL, 'PostgreSQL'),
(librdbms_dbms.SQLITE, 'sqlite'), (librdbms_dbms.ORACLE, 'oracle'))
owner = models.ForeignKey(User, on_delete=models.CASCADE, db_index=True)
query = models.TextField()
last_state = models.IntegerField(db_index=True)
has_results = models.BooleanField(default=False) # If true, this query will eventually return tabular results.
submission_date = models.DateTimeField(auto_now_add=True)
# In case of multi statements in a query, these are the id of the currently running statement
# Aka secret, only query in the "submitted" state is allowed to have no server_id
server_id = models.CharField(max_length=1024, null=True)
server_guid = models.CharField(max_length=1024, null=True, default=None)
statement_number = models.SmallIntegerField(default=0) # The index of the currently running statement
operation_type = models.SmallIntegerField(null=True)
modified_row_count = models.FloatField(null=True)
log_context = models.CharField(max_length=1024, null=True)
server_host = models.CharField(max_length=128, help_text=_('Host of the query server.'), default='')
server_port = models.PositiveIntegerField(help_text=_('Port of the query server.'), default=10000)
server_name = models.CharField(max_length=128, help_text=_('Name of the query server.'), default='')
server_type = models.CharField(max_length=128, help_text=_('Type of the query server.'), default=BEESWAX, choices=SERVER_TYPE)
query_type = models.SmallIntegerField(help_text=_('Type of the query.'), default=HQL, choices=((HQL, 'HQL'), (IMPALA, 'IMPALA')))
# Some queries (like read/create table) don't have a design
design = models.ForeignKey('SavedQuery', on_delete=models.CASCADE, to_field='id', null=True)
notify = models.BooleanField(default=False) # Notify on completion
is_redacted = models.BooleanField(default=False)
extra = models.TextField(default='{}') # Json fields for extra properties
is_cleared = models.BooleanField(default=False)
class Meta(object):
ordering = ['-submission_date']
@staticmethod
def build(*args, **kwargs):
return HiveServerQueryHistory(*args, **kwargs)
def get_full_object(self):
return HiveServerQueryHistory.objects.get(id=self.id)
@staticmethod
def get(id):
return HiveServerQueryHistory.objects.get(id=id)
@staticmethod
def get_type_name(query_type):
if query_type == IMPALA:
return 'impala'
elif query_type == RDBMS:
return 'rdbms'
elif query_type == SPARK:
return 'spark'
else:
return 'beeswax'
def get_query_server_config(self):
from beeswax.server.dbms import get_query_server_config
query_server = get_query_server_config(QueryHistory.get_type_name(self.query_type))
query_server.update({
'server_name': self.server_name,
# 'server_host': self.server_host, # Always use the live server configuration as the session is currently tied to the connection
# 'server_port': int(self.server_port),
'server_type': self.server_type,
})
return query_server
def get_current_statement(self):
if self.design is not None:
design = self.design.get_design()
return design.get_query_statement(self.statement_number)
else:
return self.query
def refresh_design(self, hql_query):
# Refresh only HQL query part
query = self.design.get_design()
query.hql_query = hql_query
self.design.data = query.dumps()
self.query = hql_query
def is_finished(self):
is_statement_finished = not self.is_running()
if self.design is not None:
design = self.design.get_design()
return is_statement_finished and self.statement_number + 1 == design.statement_count # Last statement
else:
return is_statement_finished
def is_running(self):
return self.last_state in (QueryHistory.STATE.running.value, QueryHistory.STATE.submitted.value)
def is_success(self):
return self.last_state in (QueryHistory.STATE.available.value,)
def is_failure(self):
return self.last_state in (QueryHistory.STATE.expired.value, QueryHistory.STATE.failed.value)
def is_expired(self):
return self.last_state in (QueryHistory.STATE.expired.value,)
def set_to_running(self):
self.last_state = QueryHistory.STATE.running.value
def set_to_failed(self):
self.last_state = QueryHistory.STATE.failed.value
def set_to_available(self):
self.last_state = QueryHistory.STATE.available.value
def set_to_expired(self):
self.last_state = QueryHistory.STATE.expired.value
def save(self, *args, **kwargs):
"""
Override `save` to optionally mask out the query from being saved to the
database. This is because if the beeswax database contains sensitive
information like personally identifiable information, that information
could be leaked into the Hue database and logfiles.
"""
if global_redaction_engine.is_enabled():
redacted_query = global_redaction_engine.redact(self.query)
if self.query != redacted_query:
self.query = redacted_query
self.is_redacted = True
super(QueryHistory, self).save(*args, **kwargs)
def update_extra(self, key, val):
extra = json.loads(self.extra)
extra[key] = val
self.extra = json.dumps(extra)
def get_extra(self, key):
return json.loads(self.extra).get(key)
def make_query_context(type, info):
"""
``type`` is one of "table" and "design", and ``info`` is the table name or design id.
Returns a value suitable for GET param.
"""
if type == 'table':
return "%s:%s" % (type, info)
elif type == 'design':
# Use int() to validate that info is a number
return "%s:%s" % (type, int(info))
LOG.error("Invalid query context type: %s" % (type,))
return '' # Empty string is safer than None
class HiveServerQueryHistory(QueryHistory):
# Map from (thrift) server state
STATE_MAP = {
TOperationState.INITIALIZED_STATE: QueryHistory.STATE.submitted,
TOperationState.RUNNING_STATE: QueryHistory.STATE.running,
TOperationState.FINISHED_STATE: QueryHistory.STATE.available,
TOperationState.CANCELED_STATE: QueryHistory.STATE.failed,
TOperationState.CLOSED_STATE: QueryHistory.STATE.expired,
TOperationState.ERROR_STATE: QueryHistory.STATE.failed,
TOperationState.UKNOWN_STATE: QueryHistory.STATE.failed,
TOperationState.PENDING_STATE: QueryHistory.STATE.submitted,
}
node_type = HIVE_SERVER2
class Meta(object):
proxy = True
def get_handle(self):
secret, guid = HiveServerQueryHandle.get_decoded(self.server_id, self.server_guid)
return HiveServerQueryHandle(secret=secret,
guid=guid,
has_result_set=self.has_results,
operation_type=self.operation_type,
modified_row_count=self.modified_row_count)
def save_state(self, new_state):
self.last_state = new_state.value
self.save()
@classmethod
def is_canceled(self, res):
return res.operationState in (TOperationState.CANCELED_STATE, TOperationState.CLOSED_STATE)
class SavedQuery(models.Model):
"""
Stores the query that people have save or submitted.
Note that this used to be called QueryDesign. Any references to 'design'
probably mean a SavedQuery.
"""
DEFAULT_NEW_DESIGN_NAME = _('My saved query')
AUTO_DESIGN_SUFFIX = _(' (new)')
TYPES = QUERY_TYPES
TYPES_MAPPING = {'beeswax': HQL, 'hql': HQL, 'impala': IMPALA, 'rdbms': RDBMS, 'spark': SPARK, 'hplsql': HPLSQL}
type = models.IntegerField(null=False)
owner = models.ForeignKey(User, on_delete=models.CASCADE, db_index=True)
# Data is a json of dictionary. See the beeswax.design module.
data = models.TextField(max_length=65536)
name = models.CharField(max_length=80)
desc = models.TextField(max_length=1024)
mtime = models.DateTimeField(auto_now=True)
# An auto design is a place-holder for things users submit but not saved.
# We still want to store it as a design to allow users to save them later.
is_auto = models.BooleanField(default=False, db_index=True)
is_trashed = models.BooleanField(default=False, db_index=True, verbose_name=_t('Is trashed'),
help_text=_t('If this query is trashed.'))
is_redacted = models.BooleanField(default=False)
doc = GenericRelation(Document, related_query_name='hql_doc')
class Meta(object):
ordering = ['-mtime']
def get_design(self):
try:
return HQLdesign.loads(self.data)
except ValueError:
# data is empty
pass
def clone(self, new_owner=None):
if new_owner is None:
new_owner = self.owner
design = SavedQuery(type=self.type, owner=new_owner)
design.data = self.data
design.name = self.name
design.desc = self.desc
design.is_auto = self.is_auto
return design
@classmethod
def create_empty(cls, app_name, owner, data):
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = SavedQuery(owner=owner, type=query_type)
design.name = SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.desc = ''
if global_redaction_engine.is_enabled():
design.data = global_redaction_engine.redact(data)
else:
design.data = data
design.is_auto = True
design.save()
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
design.doc.get().add_to_history()
return design
@staticmethod
def get(id, owner=None, type=None):
"""
get(id, owner=None, type=None) -> SavedQuery object
Checks that the owner and type match (when given).
May raise PopupException (type/owner mismatch).
May raise SavedQuery.DoesNotExist.
"""
try:
design = SavedQuery.objects.get(id=id)
except SavedQuery.DoesNotExist as err:
msg = _('Cannot retrieve query id %(id)s.') % {'id': id}
raise err
if owner is not None and design.owner != owner:
msg = _('Query id %(id)s does not belong to user %(user)s.') % {'id': id, 'user': owner}
LOG.error(msg)
raise PopupException(msg)
if type is not None and design.type != type:
msg = _('Type mismatch for design id %(id)s (owner %(owner)s) - Expected %(expected_type)s, got %(real_type)s.') % \
{'id': id, 'owner': owner, 'expected_type': design.type, 'real_type': type}
LOG.error(msg)
raise PopupException(msg)
return design
def __str__(self):
return '%s %s' % (self.name, self.owner)
def get_query_context(self):
try:
return make_query_context('design', self.id)
except:
LOG.exception('failed to make query context')
return ""
def get_absolute_url(self):
return reverse(QueryHistory.get_type_name(self.type) + ':execute_design', kwargs={'design_id': self.id})
def save(self, *args, **kwargs):
"""
Override `save` to optionally mask out the query from being saved to the
database. This is because if the beeswax database contains sensitive
information like personally identifiable information, that information
could be leaked into the Hue database and logfiles.
"""
if global_redaction_engine.is_enabled():
data = json.loads(self.data)
try:
query = data['query']['query']
except KeyError:
pass
else:
redacted_query = global_redaction_engine.redact(query)
if query != redacted_query:
data['query']['query'] = redacted_query
self.is_redacted = True
self.data = json.dumps(data)
super(SavedQuery, self).save(*args, **kwargs)
class SessionManager(models.Manager):
def get_session(self, user, application='beeswax', filter_open=True):
try:
q = self.filter(owner=user, application=application).exclude(guid='').exclude(secret='')
if filter_open:
q = q.filter(status_code=0)
return q.latest("last_used")
except Session.DoesNotExist as e:
return None
def get_n_sessions(self, user, n, application='beeswax', filter_open=True):
q = self.filter(owner=user, application=application).exclude(guid='').exclude(secret='')
if filter_open:
q = q.filter(status_code=0)
q = q.order_by("-last_used")
if n > 0:
return q[0:n]
else:
return q
def get_tez_session(self, user, application, n_sessions):
# Get 2 + n_sessions sessions and filter out the busy ones
sessions = Session.objects.get_n_sessions(user, n=2 + n_sessions, application=application)
LOG.debug('%s sessions found' % len(sessions))
if sessions:
# Include trashed documents to keep the query lazy and avoid retrieving all documents
docs = Document2.objects.get_history(doc_type='query-hive', user=user, include_trashed=True)
busy_sessions = set()
# Only check last 40 documents for performance
for doc in docs[:40]:
try:
snippet_data = json.loads(doc.data)['snippets'][0]
except (KeyError, IndexError):
# data might not contain a 'snippets' field or it might be empty
LOG.warning('No snippets in Document2 object of type query-hive')
continue
session_guid = snippet_data.get('result', {}).get('handle', {}).get('session_guid')
status = snippet_data.get('status')
if status in (QueryHistory.STATE.submitted.name, QueryHistory.STATE.running.name):
if session_guid is not None and session_guid not in busy_sessions:
busy_sessions.add(session_guid)
n_busy_sessions = 0
available_sessions = []
for session in sessions:
if session.guid not in busy_sessions:
available_sessions.append(session)
else:
n_busy_sessions += 1
if n_sessions > 0 and n_busy_sessions == n_sessions:
raise Exception('Too many open sessions. Stop a running query before starting a new one')
if available_sessions:
session = available_sessions[0]
else:
session = None # No available session found
return session
class Session(models.Model):
"""
A sessions is bound to a user and an application (e.g. Bob with the Impala application).
"""
owner = models.ForeignKey(User, on_delete=models.CASCADE, db_index=True)
status_code = models.PositiveSmallIntegerField() # ttypes.TStatusCode
secret = models.TextField(max_length='100')
guid = models.TextField(max_length='100')
server_protocol_version = models.SmallIntegerField(default=0)
last_used = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Last used'))
application = models.CharField(max_length=128, help_text=_t('Application we communicate with.'), default='beeswax')
properties = models.TextField(default='{}')
objects = SessionManager()
def get_handle(self):
secret, guid = self.get_adjusted_guid_secret()
secret, guid = HiveServerQueryHandle.get_decoded(secret=secret, guid=guid)
handle_id = THandleIdentifier(secret=secret, guid=guid)
return TSessionHandle(sessionId=handle_id)
def get_adjusted_guid_secret(self):
secret = self.secret
guid = self.guid
if sys.version_info[0] > 2 and not isinstance(self.secret, bytes) and not isinstance(self.guid, bytes):
# only for py3, after bytes saved, bytes wrapped in a string object
try:
secret = ast.literal_eval(secret)
guid = ast.literal_eval(guid)
except SyntaxError:
pass
return secret, guid
def get_properties(self):
return json.loads(self.properties) if self.properties else {}
def get_formatted_properties(self):
return [dict({'key': key, 'value': value}) for key, value in list(self.get_properties().items())]
def __str__(self):
return '%s %s' % (self.owner, self.last_used)
class QueryHandle(object):
def __init__(self, secret=None, guid=None, operation_type=None,
has_result_set=None, modified_row_count=None, log_context=None, session_guid=None, session_id=None):
self.secret = secret
self.guid = guid
self.operation_type = operation_type
self.has_result_set = has_result_set
self.modified_row_count = modified_row_count
self.log_context = log_context
def is_valid(self):
return sum([bool(obj) for obj in [self.get()]]) > 0
def __str__(self):
return '%s %s' % (self.secret, self.guid)
class HiveServerQueryHandle(QueryHandle):
"""
QueryHandle for Hive Server 2.
Store THandleIdentifier base64 encoded in order to be unicode compatible with Django.
Also store session handle if provided.
"""
def __init__(self, **kwargs):
super(HiveServerQueryHandle, self).__init__(**kwargs)
self.secret, self.guid = self.get_encoded()
self.session_guid = kwargs.get('session_guid')
self.session_id = kwargs.get('session_id')
def get(self):
return self.secret, self.guid
def get_rpc_handle(self):
secret, guid = self.get_decoded(self.secret, self.guid)
operation = getattr(
TOperationType,
TOperationType._VALUES_TO_NAMES.get(self.operation_type, 'EXECUTE_STATEMENT')
)
return TOperationHandle(
operationId=THandleIdentifier(guid=guid, secret=secret),
operationType=operation,
hasResultSet=self.has_result_set,
modifiedRowCount=self.modified_row_count
)
@classmethod
def get_decoded(cls, secret, guid):
if sys.version_info[0] > 2:
return base64.b64decode(secret), base64.b64decode(guid)
else:
return base64.decodestring(secret), base64.decodestring(guid)
def get_encoded(self):
if sys.version_info[0] > 2:
return base64.b64encode(self.secret), base64.b64encode(self.guid)
else:
return base64.encodestring(self.secret), base64.encodestring(self.guid)
# Deprecated. Could be removed.
class BeeswaxQueryHandle(QueryHandle):
"""
QueryHandle for Beeswax.
"""
def __init__(self, secret, has_result_set, log_context):
super(BeeswaxQueryHandle, self).__init__(secret=secret,
has_result_set=has_result_set,
log_context=log_context)
def get(self):
return self.secret, None
def get_rpc_handle(self):
return BeeswaxdQueryHandle(id=self.secret, log_context=self.log_context)
# TODO remove
def get_encoded(self):
return self.get(), None
class MetaInstall(models.Model):
"""
Metadata about the installation. Should have at most one row.
"""
installed_example = models.BooleanField(default=False)
@staticmethod
def get():
"""
MetaInstall.get() -> MetaInstall object
It helps dealing with that this table has just one row.
"""
try:
return MetaInstall.objects.get(id=1)
except MetaInstall.DoesNotExist:
return MetaInstall(id=1)
|
|
'''
This module defines classes needed to manipulate c++ types from pythran.
'''
from pythran.config import cfg
class Weak:
"""
Type Qualifier used to represent a weak type
When a weak type is combined with another type, the weak type is suppressed
"""
class Type(object):
"""
A generic type object to be sub-classed
It maintains a set of qualifiers and
a tuple of fields used for type comparison.
The keyword arguments are used to built the internal representation:
one attribute per key with the associated value
"""
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.qualifiers = self.qualifiers.copy() # avoid sharing
self.fields = tuple(sorted(kwargs.keys()))
self.iscore = False
def isweak(self):
return Weak in self.qualifiers
def all_types(self):
return {self}
def __eq__(self, other):
havesameclass = self.__class__ == other.__class__
if havesameclass:
def same(x, y):
return getattr(self, x) == getattr(other, y)
return all(same(x, y) for x, y in zip(self.fields, other.fields))
else:
return False
def __add__(self, other):
if self.isweak() and not other.isweak():
return other
if other.isweak() and not self.isweak():
return self
if self == other:
return self
if isinstance(other, CombinedTypes) and self in other.types:
return other
return CombinedTypes([self, other])
def __repr__(self):
return self.generate(lambda x: x)
class NamedType(Type):
"""
A generic type object, to hold scalar types and such
>>> NamedType('long long')
long long
"""
def __init__(self, srepr, qualifiers=None):
if qualifiers is None:
qualifiers = set()
super(NamedType, self).__init__(srepr=srepr, qualifiers=qualifiers)
def generate(self, _):
return self.srepr
class PType(Type):
"""
A generic parametric type
"""
prefix = "__ptype{0}"
count = 0
def __init__(self, fun, ptype):
super(PType, self).__init__(fun=fun,
type=ptype,
qualifiers=ptype.qualifiers,
name=PType.prefix.format(PType.count))
PType.count += 1
def generate(self, ctx):
return ctx(self.type).generate(ctx)
def instanciate(self, caller, arguments):
return InstanciatedType(self.fun,
self.name,
arguments,
caller,
self.qualifiers)
class InstanciatedType(Type):
"""
A type instanciated from a parametric type
"""
def __init__(self, fun, name, arguments, caller, qualifiers):
super(InstanciatedType, self).__init__(fun=fun,
name=name,
arguments=arguments,
qualifiers=qualifiers)
if fun == caller:
self.qualifiers.add(Weak)
def generate(self, ctx):
if self.arguments:
args = ", ".join(ctx(arg).generate(ctx) for arg in self.arguments)
template_params = "<{0}>".format(args)
else:
template_params = ""
return "typename {0}::type{1}::{2}".format(self.fun.name,
template_params,
self.name)
class CombinedTypes(Type):
"""
type resulting from the combination of other types
>>> NamedType('long') + NamedType('long')
long
>>> NamedType('long') + NamedType('char')
typename __combined<char,long>::type
"""
def __init__(self, types):
super(CombinedTypes, self).__init__(
types=types,
qualifiers=set.union(*[t.qualifiers for t in types])
)
def __add__(self, other):
if isinstance(other, CombinedTypes):
return CombinedTypes([self, other])
if other in self.types:
return self
if other.isweak() and not self.isweak():
return self
if self == other:
return self
return CombinedTypes([self, other])
def all_types(self):
out = set()
for t in self.types:
out.update(t.all_types())
return out
def generate(self, ctx):
# gather all underlying types and make sure they do not appear twice
mct = cfg.getint('typing', 'max_container_type')
all_types = self.all_types()
def fot0(t):
return isinstance(t, IndexableType)
def fot1(t):
return isinstance(t, ContainerType)
def fit(t):
return not fot0(t) and not fot1(t)
it = filter(fit, all_types)
it = sorted(it, key=lambda t: t.iscore, reverse=True)
ot0 = filter(fot0, all_types)
ot1 = filter(fot1, all_types)
icombined = sorted({ctx(t).generate(ctx) for t in it if t.iscore})
icombined += sorted({ctx(t).generate(ctx) for t in it if not t.iscore})
lcombined0 = sorted({ctx(t).generate(ctx) for t in ot0})[-mct:]
lcombined1 = sorted({ctx(t).generate(ctx) for t in ot1})[-mct:]
combined = icombined + lcombined0 + lcombined1
if len(combined) == 1:
return combined[0]
else:
return 'typename __combined<{0}>::type'.format(",".join(combined))
class ArgumentType(Type):
"""
A type to hold function arguments
>>> ArgumentType(4)
typename std::remove_cv<\
typename std::remove_reference<argument_type4>::type>::type
"""
def __init__(self, num, qualifiers=None):
if qualifiers is None:
qualifiers = set()
super(ArgumentType, self).__init__(num=num,
qualifiers=qualifiers)
def generate(self, _):
argtype = "argument_type{0}".format(self.num)
noref = "typename std::remove_reference<{0}>::type".format(argtype)
return "typename std::remove_cv<{0}>::type".format(noref)
class DependentType(Type):
"""
A class to be sub-classed by any type that depends on another type
"""
def __init__(self, of):
super(DependentType, self).__init__(of=of,
qualifiers=of.qualifiers)
class Assignable(DependentType):
"""
A type which can be assigned
It is used to make the difference between
* transient types (e.g. generated from expression template)
* assignable types (typically type of a variable)
>>> Assignable(NamedType("long"))
typename pythonic::assignable<long>::type
"""
def generate(self, ctx):
return 'typename pythonic::assignable<{0}>::type'.format(
self.of.generate(ctx))
class Returnable(DependentType):
"""
A type which can be returned
It is used to make the difference between
* returned types (that cannot hold a reference to avoid dangling reference)
* assignable types (local to a function)
>>> Returnable(NamedType("long"))
typename pythonic::returnable<long>::type
"""
def generate(self, ctx):
return 'typename pythonic::returnable<{0}>::type'.format(
self.of.generate(ctx))
class Lazy(DependentType):
"""
A type which can be a reference
It is used to make a lazy evaluation of numpy expressions
>>> Lazy(NamedType("long"))
typename pythonic::lazy<long>::type
"""
def generate(self, ctx):
return 'typename pythonic::lazy<{0}>::type'.format(
self.of.generate(ctx))
class DeclType(NamedType):
"""
Gather the type of a variable
>>> DeclType("toto")
typename std::remove_cv<\
typename std::remove_reference<decltype(toto)>::type>::type
"""
def generate(self, _):
return ('typename std::remove_cv<'
'typename std::remove_reference<'
'decltype({0})>::type>::type'.format(self.srepr))
class ContentType(DependentType):
'''
Type of the object in a container
>>> ContentType(DeclType('l'))
typename pythonic::types::content_of<typename std::remove_cv<\
typename std::remove_reference<decltype(l)>::type>::type>::type
'''
def generate(self, ctx):
# the content of a container can be inferred directly
if isinstance(self.of, (ListType, SetType, ContainerType)):
return self.of.of.generate(ctx)
return 'typename pythonic::types::content_of<{0}>::type'.format(
ctx(self.of).generate(ctx))
class IteratorContentType(DependentType):
'''
Type of an iterator over the content of a container
>>> IteratorContentType(NamedType('str'))
typename std::remove_cv<typename std::iterator_traits<\
typename std::remove_reference<str>::type::iterator>::value_type>::type
'''
def generate(self, ctx):
iterator_value_type = ctx(self.of).generate(ctx)
return 'typename std::remove_cv<{0}>::type'.format(
'typename std::iterator_traits<{0}>::value_type'.format(
'typename std::remove_reference<{0}>::type::iterator'.format(
iterator_value_type)
)
)
class GetAttr(Type):
'''
Type of a named attribute
>>> GetAttr(NamedType('complex'), 'real')
decltype(pythonic::__builtin__::getattr<pythonic::types::attr::REAL>\
(std::declval<complex>()))
'''
def __init__(self, param, attr):
super(GetAttr, self).__init__(
qualifiers=param.qualifiers,
param=param,
attr=attr)
def generate(self, ctx):
return ('decltype(pythonic::__builtin__::getattr<{}>({}))'
.format('pythonic::types::attr::' + self.attr.upper(),
'std::declval<' + self.param.generate(ctx) + '>()'))
class ReturnType(Type):
'''
Return type of a call with arguments
>>> ReturnType(NamedType('math::cos'), [NamedType('float')])
decltype(std::declval<math::cos>()(std::declval<float>()))
'''
def __init__(self, ftype, args):
args_qualifiers = [arg.qualifiers for arg in args]
super(ReturnType, self).__init__(
qualifiers=ftype.qualifiers.union(*args_qualifiers),
ftype=ftype,
args=args)
def generate(self, ctx):
# the return type of a constructor is obvious
cg = self.ftype.generate(ctx)
cg = 'std::declval<{0}>()'.format(cg)
args = ("std::declval<{0}>()".format(ctx(arg).generate(ctx))
for arg in self.args)
return 'decltype({0}({1}))'.format(cg, ", ".join(args))
class ElementType(Type):
'''
Type of the ith element of a tuple or container
>>> t = TupleType([NamedType('int'), NamedType('str')])
>>> ElementType(1, t)
typename std::tuple_element<1,typename std::remove_reference<\
decltype(pythonic::types::make_tuple(std::declval<int>(), \
std::declval<str>()))>::type>::type
'''
def __init__(self, index, of):
super(ElementType, self).__init__(qualifiers=of.qualifiers,
of=of,
index=index)
def generate(self, ctx):
return 'typename std::tuple_element<{0},{1}>::type'.format(
self.index,
'typename std::remove_reference<{0}>::type'.format(
ctx(self.of).generate(ctx)
)
)
class ListType(DependentType):
'''
Type holding a list of stuff of the same type
>>> ListType(NamedType('int'))
pythonic::types::list<int>
'''
def generate(self, ctx):
return 'pythonic::types::list<{0}>'.format(ctx(self.of).generate(ctx))
class SetType(DependentType):
'''
Type holding a set of stuff of the same type
>>> SetType(NamedType('int'))
pythonic::types::set<int>
'''
def generate(self, ctx):
return 'pythonic::types::set<{0}>'.format(ctx(self.of).generate(ctx))
class TupleType(Type):
'''
Type holding a tuple of stuffs of various types
>>> TupleType([NamedType('int'), NamedType('bool')])
decltype(pythonic::types::make_tuple(std::declval<int>(), \
std::declval<bool>()))
'''
def __init__(self, ofs):
if ofs:
qualifiers = set.union(*[of.qualifiers for of in ofs])
else:
qualifiers = set()
super(TupleType, self).__init__(ofs=ofs, qualifiers=qualifiers)
def generate(self, ctx):
elts = (ctx(of).generate(ctx) for of in self.ofs)
telts = ('std::declval<{0}>()'.format(elt) for elt in elts)
return 'decltype(pythonic::types::make_tuple({0}))'.format(
", ".join(telts))
class DictType(Type):
'''
Type holding a dict of stuff of the same key and value type
>>> DictType(NamedType('int'), NamedType('float'))
pythonic::types::dict<int,float>
'''
def __init__(self, of_key, of_value):
super(DictType, self).__init__(
qualifiers=of_key.qualifiers.union(of_value.qualifiers),
of_key=of_key,
of_value=of_value
)
def generate(self, ctx):
return 'pythonic::types::dict<{0},{1}>'.format(
ctx(self.of_key).generate(ctx),
ctx(self.of_value).generate(ctx))
class ContainerType(DependentType):
'''
Type of any container of stuff of the same type
>>> ContainerType(NamedType('int'))
container<typename std::remove_reference<int>::type>
'''
def generate(self, ctx):
return 'container<typename std::remove_reference<{0}>::type>'.format(
ctx(self.of).generate(ctx))
class IndexableType(DependentType):
'''
Type of any container indexed by the same type
>>> IndexableType(NamedType('int'))
indexable<int>
'''
def generate(self, ctx):
return 'indexable<{0}>'.format(ctx(self.of).generate(ctx))
class ExpressionType(Type):
"""
Result type of an operator call.
>>> op = lambda x,y: x + '+' + y
>>> ExpressionType(op, [NamedType('long'), NamedType('int')])
decltype(std::declval<long>()+std::declval<int>())
"""
def __init__(self, op, exprs):
super(ExpressionType, self).__init__(
qualifiers=set.union(*[expr.qualifiers for expr in exprs]),
op=op,
exprs=exprs)
def generate(self, ctx):
texprs = (ctx(expr).generate(ctx) for expr in self.exprs)
return 'decltype({0})'.format(
self.op(*["std::declval<{0}>()".format(t) for t in texprs]))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serve TensorFlow summary data to a web frontend.
This is a simple web server to proxy data from the event_loader to the web, and
serve static web files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as base_logging
import os
import socket
import sys
import tensorflow as tf
from werkzeug import serving
from tensorboard import util
from tensorboard import version
from tensorboard.backend import application
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins.audio import audio_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.distribution import distributions_plugin
from tensorboard.plugins.graph import graphs_plugin
from tensorboard.plugins.histogram import histograms_plugin
from tensorboard.plugins.image import images_plugin
from tensorboard.plugins.profile import profile_plugin
from tensorboard.plugins.projector import projector_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.text import text_plugin
# TensorBoard flags
tf.flags.DEFINE_string('logdir', '', """logdir specifies the directory where
TensorBoard will look to find TensorFlow event files that it can display.
TensorBoard will recursively walk the directory structure rooted at logdir,
looking for .*tfevents.* files.
You may also pass a comma separated list of log directories, and TensorBoard
will watch each directory. You can also assign names to individual log
directories by putting a colon between the name and the path, as in
tensorboard --logdir=name1:/path/to/logs/1,name2:/path/to/logs/2
""")
tf.flags.DEFINE_string(
'host', '', 'What host to listen to. Defaults to '
'serving on all interfaces, set to 127.0.0.1 (localhost) to'
'disable remote access (also quiets security warnings).')
tf.flags.DEFINE_integer('port', 6006, 'What port to serve TensorBoard on.')
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
tf.flags.DEFINE_integer('reload_interval', 5,
'How often the backend should load '
'more data.')
tf.flags.DEFINE_string('db', "", """\
[Experimental] Sets SQL database URI.
This mode causes TensorBoard to persist experiments to a SQL database. The
following databases are supported:
- sqlite: Use SQLite built in to Python. URI must specify the path of the
database file, which will be created if it doesn't exist. For example:
--db=sqlite3:~/.tensorboard.db
Warning: This feature is a work in progress and only has limited support.
""")
# Inspect Mode flags
tf.flags.DEFINE_boolean('inspect', False, """Use this flag to print out a digest
of your event files to the command line, when no data is shown on TensorBoard or
the data shown looks weird.
Example usages:
tensorboard --inspect --event_file=myevents.out
tensorboard --inspect --event_file=myevents.out --tag=loss
tensorboard --inspect --logdir=mylogdir
tensorboard --inspect --logdir=mylogdir --tag=loss
See tensorflow/python/summary/event_file_inspector.py for more info and
detailed usage.
""")
tf.flags.DEFINE_string(
'tag', '',
'The particular tag to query for. Only used if --inspect is present')
tf.flags.DEFINE_string(
'event_file', '',
'The particular event file to query for. Only used if --inspect is present '
'and --logdir is not specified.')
FLAGS = tf.flags.FLAGS
def create_tb_app(plugins, assets_zip_provider=None):
"""Read the flags, and create a TensorBoard WSGI application.
Args:
plugins: A list of constructor functions for TBPlugin subclasses.
assets_zip_provider: Delegates to TBContext or uses default if None.
Raises:
ValueError: if a logdir is not specified.
Returns:
A new TensorBoard WSGI application.
"""
if not FLAGS.db and not FLAGS.logdir:
raise ValueError('A logdir must be specified when db is not specified. '
'Run `tensorboard --help` for details and examples.')
return application.standard_tensorboard_wsgi(
assets_zip_provider=assets_zip_provider,
db_uri=FLAGS.db,
logdir=os.path.expanduser(FLAGS.logdir),
purge_orphaned_data=FLAGS.purge_orphaned_data,
reload_interval=FLAGS.reload_interval,
plugins=plugins)
def make_simple_server(tb_app, host=None, port=None):
"""Create an HTTP server for TensorBoard.
Args:
tb_app: The TensorBoard WSGI application to create a server for.
host: Indicates the interfaces to bind to ('::' or '0.0.0.0' for all
interfaces, '::1' or '127.0.0.1' for localhost). A blank value ('')
indicates protocol-agnostic all interfaces. If not specified, will
default to the flag value.
port: The port to bind to (0 indicates an unused port selected by the
operating system). If not specified, will default to the flag value.
Returns:
A tuple of (server, url):
server: An HTTP server object configured to host TensorBoard.
url: A best guess at a URL where TensorBoard will be accessible once the
server has been started.
Raises:
socket.error: If a server could not be constructed with the host and port
specified. Also logs an error message.
"""
if host is None:
host = FLAGS.host
if port is None:
port = FLAGS.port
try:
if host:
# The user gave us an explicit host
server = serving.make_server(host, port, tb_app, threaded=True)
if ':' in host and not host.startswith('['):
# Display IPv6 addresses as [::1]:80 rather than ::1:80
final_host = '[{}]'.format(host)
else:
final_host = host
else:
# We've promised to bind to all interfaces on this host. However, we're
# not sure whether that means IPv4 or IPv6 interfaces.
try:
# First try passing in a blank host (meaning all interfaces). This,
# unfortunately, defaults to IPv4 even if no IPv4 interface is available
# (yielding a socket.error).
server = serving.make_server(host, port, tb_app, threaded=True)
except socket.error:
# If a blank host didn't work, we explicitly request IPv6 interfaces.
server = serving.make_server('::', port, tb_app, threaded=True)
final_host = socket.gethostname()
server.daemon_threads = True
except socket.error as socket_error:
if port == 0:
msg = 'TensorBoard unable to find any open port'
else:
msg = (
'TensorBoard attempted to bind to port %d, but it was already in use'
% port)
tf.logging.error(msg)
print(msg)
raise socket_error
final_port = server.socket.getsockname()[1]
tensorboard_url = 'http://%s:%d' % (final_host, final_port)
return server, tensorboard_url
def run_simple_server(tb_app):
"""Run a TensorBoard HTTP server, and print some messages to the console."""
try:
server, url = make_simple_server(tb_app)
except socket.error:
# An error message was already logged
# TODO(@jart): Remove log and throw anti-pattern.
sys.exit(-1)
logger = base_logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL)
logger.setLevel(base_logging.INFO)
logger.info('TensorBoard %s at %s (Press CTRL+C to quit) ',
version.VERSION, url)
try:
server.serve_forever()
finally:
logger.info('')
def main(unused_argv=None):
util.setup_logging()
if FLAGS.inspect:
tf.logging.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(FLAGS.event_file)
efi.inspect(FLAGS.logdir, event_file, FLAGS.tag)
return 0
else:
plugins = [
core_plugin.CorePlugin,
scalars_plugin.ScalarsPlugin,
images_plugin.ImagesPlugin,
audio_plugin.AudioPlugin,
graphs_plugin.GraphsPlugin,
distributions_plugin.DistributionsPlugin,
histograms_plugin.HistogramsPlugin,
projector_plugin.ProjectorPlugin,
text_plugin.TextPlugin,
profile_plugin.ProfilePlugin,
]
tb = create_tb_app(plugins)
run_simple_server(tb)
if __name__ == '__main__':
tf.app.run()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations:
"""VirtualNetworksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualNetwork":
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.VirtualNetwork",
**kwargs: Any
) -> "_models.VirtualNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.VirtualNetwork",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualNetwork"]:
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_03_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
async def check_ip_address_availability(
self,
resource_group_name: str,
virtual_network_name: str,
ip_address: Optional[str] = None,
**kwargs: Any
) -> "_models.IPAddressAvailabilityResult":
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualNetworkListUsageResult"]:
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
|
|
# Copyright 2013 Lars Butler & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import geomet
def load(source_file):
"""
Converts Esri Json File to GeoJSON.
:param source_file:
Path to a file that contains the Esri JSON data.
:returns:
A GeoJSON `dict` representing the geometry read from the file.
"""
return json.load(source_file)
def loads(string):
"""
Construct a GeoJSON `dict` from Esri JSON (string/dict).
:param string:
The Esri JSON geometry representation
:returns:
A GeoJSON `dict` representing the geometry read from the file.
"""
if isinstance(string, str):
string = json.loads(string)
if 'rings' in string:
return _esri_to_geojson_convert['rings'](string)
elif 'paths' in string:
return _esri_to_geojson_convert['paths'](string)
elif 'x' in string or 'y' in string:
return _esri_to_geojson_convert['x'](string)
elif 'points' in string:
return _esri_to_geojson_convert['points'](string)
else:
raise geomet.InvalidGeoJSONException('Invalid EsriJSON: %s' % string)
def dump(obj, dest_file, srid=None):
"""
Converts GeoJSON to Esri JSON File.
"""
return json.dump(dumps(obj, srid=srid), dest_file)
def dumps(obj, srid=None):
"""
Dump a GeoJSON-like `dict` to a Esri JSON.
:param string:
The GeoJSON geometry representation
:param int:
The default SRID value if none is present.
"""
if 'type' in obj and \
obj['type'].lower() in _gj_to_esri.keys():
convert = _gj_to_esri[obj['type'].lower()]
return convert(obj, srid=srid)
else:
raise geomet.InvalidGeoJSONException("Invalid GeoJSON type %s" % obj)
def _extract_geojson_srid(obj):
"""
Extracts the SRID code (WKID code) from geojson. If not found, SRID=4326
:returns: Integer
"""
meta_srid = obj.get('meta', {}).get('srid', None)
# Also try to get it from `crs.properties.name`:
crs_srid = obj.get('crs', {}).get('properties', {}).get('name', None)
if crs_srid is not None:
# Shave off the EPSG: prefix to give us the SRID:
crs_srid = crs_srid.replace('EPSG:', '')
if (meta_srid is not None and
crs_srid is not None and
str(meta_srid) != str(crs_srid)):
raise ValueError(
'Ambiguous CRS/SRID values: %s and %s' % (meta_srid, crs_srid)
)
srid = meta_srid or crs_srid
return srid or 4326
def _dump_geojson_point(obj, srid=None):
"""
Loads GeoJSON to Esri JSON for Geometry type Point.
"""
coordkey = 'coordinates'
coords = obj[coordkey]
srid = _extract_geojson_srid(obj) or srid
return {'x': coords[0], 'y': coords[1], "spatialReference": {'wkid': srid}}
def _dump_geojson_multipoint(obj, srid=None):
"""
Loads GeoJSON to Esri JSON for Geometry type MultiPoint.
"""
coordkey = 'coordinates'
srid = _extract_geojson_srid(obj) or srid
return {"points": obj[coordkey], "spatialReference": {"wkid": srid}}
def _dump_geojson_polyline(obj, srid=None):
"""
Loads GeoJSON to Esri JSON for Geometry type LineString and MultiLineString.
"""
coordkey = 'coordinates'
if obj['type'].lower() == 'linestring':
coordinates = [obj[coordkey]]
else:
coordinates = obj[coordkey]
srid = _extract_geojson_srid(obj) or srid
return {"paths": coordinates, "spatialReference": {"wkid": srid}}
def _dump_geojson_polygon(data, srid=None):
"""
Loads GeoJSON to Esri JSON for Geometry type Polygon or MultiPolygon.
"""
coordkey = 'coordinates'
coordinates = data[coordkey]
typekey = ([d for d in data if d.lower() == 'type']
or ['type']).pop()
if data[typekey].lower() == "polygon":
coordinates = [coordinates]
part_list = []
for part in coordinates:
part_item = []
for idx, ring in enumerate(part):
if idx:
part_item.append(None)
for coord in ring:
part_item.append(coord)
if part_item:
part_list.append(part_item)
srid = _extract_geojson_srid(data) or srid
return {'rings': part_list, "spatialReference": {"wkid": srid}}
def _to_gj_point(obj):
"""
Dump a Esri JSON Point to GeoJSON Point.
:param dict obj:
A EsriJSON-like `dict` representing a Point.
:returns:
GeoJSON representation of the Esri JSON Point
"""
if obj.get("x", None) is None or \
obj.get("y", None) is None:
return {'type': 'Point', 'coordinates': ()}
return {'type': 'Point', 'coordinates': (obj.get("x"),
obj.get("y"))}
def _to_gj_polygon(obj):
"""
Dump a EsriJSON-like Polygon object to GeoJSON.
Input parameters and return value are the POLYGON equivalent to
:func:`_to_gj_point`.
"""
def split_part(a_part):
part_list = []
for item in a_part:
if item is None:
if part_list:
yield part_list
part_list = []
else:
part_list.append((item[0], item[1]))
if part_list:
yield part_list
part_json = [list(split_part(part))
for part in obj['rings']]
return {'type': 'MultiPolygon', 'coordinates': part_json}
def _to_gj_multipoint(data):
"""
Dump a EsriJSON-like MultiPoint object to GeoJSON-dict.
Input parameters and return value are the MULTIPOINT equivalent to
:func:`_to_gj_point`.
:returns: `dict`
"""
return {'type': 'Multipoint', 'coordinates': [pt for pt in data['points']]}
def _to_gj_polyline(data):
"""
Dump a GeoJSON-like MultiLineString object to WKT.
Input parameters and return value are the MULTILINESTRING equivalent to
:func:`_dump_point`.
"""
return {'type': 'MultiLineString', 'coordinates': [
[((pt[0], pt[1]) if pt else None) for pt in part] for part in data["paths"]]}
_esri_to_geojson_convert = {
"x": _to_gj_point,
"y": _to_gj_point,
"points": _to_gj_multipoint,
"rings": _to_gj_polygon,
"paths": _to_gj_polyline}
_gj_to_esri = {
"point": _dump_geojson_point,
"multipoint": _dump_geojson_multipoint,
"linestring": _dump_geojson_polyline,
"multilinestring": _dump_geojson_polyline,
"polygon": _dump_geojson_polygon,
"multipolygon": _dump_geojson_polygon}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.VariableV1(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import IStructure, Structure, IMolecule, \
StructureError, Molecule
from pymatgen.core.lattice import Lattice
import random
import warnings
import os
class IStructureTest(PymatgenTest):
def setUp(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
self.lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = IStructure(self.lattice, ["Si"] * 2, coords)
self.assertEqual(len(self.struct), 2,
"Wrong number of sites in structure!")
self.assertTrue(self.struct.is_ordered)
self.assertTrue(self.struct.ntypesp == 1)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 2, coords, True)
self.propertied_structure = IStructure(
self.lattice, ["Si"] * 2, coords,
site_properties={'magmom': [5, -5]})
def test_bad_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.5, 0.75])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 3, coords, validate_proximity=True)
#these shouldn't raise an error
IStructure(self.lattice, ["Si"] * 2, coords[:2], True)
IStructure(self.lattice, ["Si"], coords[:1], True)
def test_volume_and_density(self):
self.assertAlmostEqual(self.struct.volume, 40.04, 2, "Volume wrong!")
self.assertAlmostEqual(self.struct.density, 2.33, 2,
"Incorrect density")
def test_specie_init(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2): 1.0},
{Specie('Mg', 2): 0.8}], coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
def test_get_sorted_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, ["O", "Li"], coords,
site_properties={'charge': [-2, 1]})
sorted_s = s.get_sorted_structure()
self.assertEqual(sorted_s[0].species_and_occu, Composition("Li"))
self.assertEqual(sorted_s[1].species_and_occu, Composition("O"))
self.assertEqual(sorted_s[0].charge, 1)
self.assertEqual(sorted_s[1].charge, -2)
s = IStructure(self.lattice, ["Se", "C", "Se", "C"],
[[0] * 3, [0.5] * 3, [0.25] * 3, [0.75] * 3])
self.assertEqual([site.specie.symbol
for site in s.get_sorted_structure()],
["C", "C", "Se", "Se"])
def test_fractional_occupations(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{'O': 1.0}, {'Mg': 0.8}],
coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
self.assertFalse(s.is_ordered)
def test_get_distance(self):
self.assertAlmostEqual(self.struct.get_distance(0, 1), 2.35, 2,
"Distance calculated wrongly!")
pt = [0.9, 0.9, 0.8]
self.assertAlmostEqual(self.struct[0].distance_from_point(pt),
1.50332963784, 2,
"Distance calculated wrongly!")
def test_as_dict(self):
si = Specie("Si", 4)
mn = Element("Mn")
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, [{si: 0.5, mn: 0.5}, {si: 0.5}],
coords)
self.assertIn("lattice", struct.as_dict())
self.assertIn("sites", struct.as_dict())
d = self.propertied_structure.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2,
properties={"spin": 3}): 1.0},
{Specie('Mg', 2,
properties={"spin": 2}): 0.8}],
coords, site_properties={'magmom': [5, -5]})
d = s.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
self.assertEqual(d['sites'][0]['species'][0]['properties']['spin'], 3)
def test_from_dict(self):
d = self.propertied_structure.as_dict()
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
d = {'lattice': {'a': 3.8401979337, 'volume': 40.044794644251596,
'c': 3.8401979337177736, 'b': 3.840198994344244,
'matrix': [[3.8401979337, 0.0, 0.0],
[1.9200989668, 3.3257101909, 0.0],
[0.0, -2.2171384943, 3.1355090603]],
'alpha': 119.9999908639842, 'beta': 90.0,
'gamma': 60.000009137322195},
'sites': [{'properties': {'magmom': 5}, 'abc': [0.0, 0.0, 0.0],
'occu': 1.0, 'species': [{'occu': 1.0,
'oxidation_state': -2,
'properties': {'spin': 3},
'element': 'O'}],
'label': 'O2-', 'xyz': [0.0, 0.0, 0.0]},
{'properties': {'magmom': -5},
'abc': [0.75, 0.5, 0.75],
'occu': 0.8, 'species': [{'occu': 0.8,
'oxidation_state': 2,
'properties': {'spin': 2},
'element': 'Mg'}],
'label': 'Mg2+:0.800',
'xyz': [3.8401979336749994, 1.2247250003039056e-06,
2.351631795225]}]}
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
self.assertEqual(s[0].specie.spin, 3)
self.assertEqual(type(s), IStructure)
def test_site_properties(self):
site_props = self.propertied_structure.site_properties
self.assertEqual(site_props['magmom'], [5, -5])
self.assertEqual(self.propertied_structure[0].magmom, 5)
self.assertEqual(self.propertied_structure[1].magmom, -5)
def test_copy(self):
new_struct = self.propertied_structure.copy(site_properties={'charge':
[2, 3]})
self.assertEqual(new_struct[0].magmom, 5)
self.assertEqual(new_struct[1].magmom, -5)
self.assertEqual(new_struct[0].charge, 2)
self.assertEqual(new_struct[1].charge, 3)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
structure = IStructure(self.lattice, ["O", "Si"], coords,
site_properties={'magmom': [5, -5]})
new_struct = structure.copy(site_properties={'charge': [2, 3]},
sanitize=True)
self.assertEqual(new_struct[0].magmom, -5)
self.assertEqual(new_struct[1].magmom, 5)
self.assertEqual(new_struct[0].charge, 3)
self.assertEqual(new_struct[1].charge, 2)
self.assertAlmostEqual(new_struct.volume, structure.volume)
def test_interpolate(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 10)
for s in int_s:
self.assertIsNotNone(s, "Interpolation Failed!")
self.assertEqual(int_s[0].lattice, s.lattice)
self.assertArrayEqual(int_s[1][1].frac_coords, [0.725, 0.5, 0.725])
badlattice = [[1, 0.00, 0.00], [0, 1, 0.00], [0.00, 0, 1]]
struct2 = IStructure(badlattice, ["Si"] * 2, coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si", "Fe"], coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
# Test autosort feature.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s1.pop(0)
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2.pop(2)
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[0].frac_coords, s[0].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
# Make sure autosort has no effect on simpler interpolations,
# and with shuffled sites.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2[0] = "Fe", [0.01, 0.01, 0.01]
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[1].frac_coords, s[1].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
self.assertArrayAlmostEqual(s1[3].frac_coords, s[3].frac_coords)
def test_interpolate_lattice(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
l2 = Lattice.from_lengths_and_angles([3,4,4], [100,100,70])
struct2 = IStructure(l2, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 2, interpolate_lattices=True)
self.assertArrayAlmostEqual(struct.lattice.abc,
int_s[0].lattice.abc)
self.assertArrayAlmostEqual(struct.lattice.angles,
int_s[0].lattice.angles)
self.assertArrayAlmostEqual(struct2.lattice.abc,
int_s[2].lattice.abc)
self.assertArrayAlmostEqual(struct2.lattice.angles,
int_s[2].lattice.angles)
int_angles = [(a + struct2.lattice.angles[i]) / 2
for i, a in enumerate(struct.lattice.angles)]
self.assertArrayAlmostEqual(int_angles,
int_s[1].lattice.angles)
def test_get_primitive_structure(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(fcc_ag.get_primitive_structure()), 1)
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
bcc_li = IStructure(Lattice.cubic(4.09), ["Li"] * 2, coords)
bcc_prim = bcc_li.get_primitive_structure()
self.assertEqual(len(bcc_prim), 1)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)
coords = [[0] * 3, [0.5] * 3, [0.25] * 3, [0.26] * 3]
s = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(s.get_primitive_structure()), 4)
def test_primitive_cell_site_merging(self):
l = Lattice.cubic(10)
coords = [[0, 0, 0], [0, 0, 0.5],
[0, 0, 0.26], [0, 0, 0.74]]
sp = ['Ag', 'Ag', 'Be', 'Be']
s = Structure(l, sp, coords)
dm = s.get_primitive_structure().distance_matrix
self.assertArrayAlmostEqual(dm, [[0, 2.5], [2.5, 0]])
def test_primitive_on_large_supercell(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = Structure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
fcc_ag.make_supercell([2, 2, 2])
fcc_ag_prim = fcc_ag.get_primitive_structure()
self.assertEqual(len(fcc_ag_prim), 1)
self.assertAlmostEqual(fcc_ag_prim.volume, 17.10448225)
def test_primitive_positions(self):
coords = [[0, 0, 0], [0.3, 0.35, 0.45]]
s = Structure(Lattice.from_parameters(1,2,3,50,66,88), ["Ag"] * 2, coords)
a = [[-1,2,-3], [3,2,-4], [1,0,-1]]
b = [[4, 0, 0], [1, 1, 0], [3, 0, 1]]
c = [[2, 0, 0], [1, 3, 0], [1, 1, 1]]
for sc_matrix in [c]:
sc = s.copy()
sc.make_supercell(sc_matrix)
prim = sc.get_primitive_structure(0.01)
self.assertEqual(len(prim), 2)
self.assertAlmostEqual(prim.distance_matrix[0,1], 1.0203432356739286)
def test_primitive_structure_volume_check(self):
l = Lattice.tetragonal(10, 30)
coords = [[0.5, 0.8, 0], [0.5, 0.2, 0],
[0.5, 0.8, 0.333], [0.5, 0.5, 0.333],
[0.5, 0.5, 0.666], [0.5, 0.2, 0.666]]
s = IStructure(l, ["Ag"] * 6, coords)
sprim = s.get_primitive_structure(tolerance=0.1)
self.assertEqual(len(sprim), 6)
def test_get_all_neighbors_and_get_neighbors(self):
s = self.struct
nn = s.get_neighbors_in_shell(s[0].frac_coords, 2, 4,
include_index=True)
self.assertEqual(len(nn), 47)
self.assertEqual(nn[0][-1], 0)
r = random.uniform(3, 6)
all_nn = s.get_all_neighbors(r, True)
for i in range(len(s)):
self.assertEqual(len(all_nn[i]), len(s.get_neighbors(s[i], r)))
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
s = Structure(Lattice.cubic(1), ['Li'], [[0,0,0]])
s.make_supercell([2,2,2])
self.assertEqual(sum(map(len, s.get_all_neighbors(3))), 976)
def test_get_all_neighbors_outside_cell(self):
s = Structure(Lattice.cubic(2), ['Li', 'Li', 'Li', 'Si'],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3])
all_nn = s.get_all_neighbors(0.2, True)
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
self.assertEqual(list(map(len, all_nn)), [2, 2, 2, 0])
def test_get_dist_matrix(self):
ans = [[0., 2.3516318],
[2.3516318, 0.]]
self.assertArrayAlmostEqual(self.struct.distance_matrix, ans)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr"]:
s = self.struct.to(fmt=fmt)
self.assertIsNotNone(s)
ss = IStructure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.struct.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords, self.struct.frac_coords)
self.assertIsInstance(ss, IStructure)
self.struct.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.struct.to(filename="Si_testing.yaml")
self.assertTrue(os.path.exists("Si_testing.yaml"))
s = Structure.from_file("Si_testing.yaml")
self.assertEqual(s, self.struct)
os.remove("Si_testing.yaml")
class StructureTest(PymatgenTest):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.structure = Structure(lattice, ["Si", "Si"], coords)
def test_mutable_sequence_methods(self):
s = self.structure
s[0] = "Fe"
self.assertEqual(s.formula, "Fe1 Si1")
s[0] = "Fe", [0.5, 0.5, 0.5]
self.assertEqual(s.formula, "Fe1 Si1")
self.assertArrayAlmostEqual(s[0].frac_coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("Si"))
self.assertArrayAlmostEqual(s[0].frac_coords, [0.75, 0.5, 0.75])
s[0] = {"Mn": 0.5}
self.assertEqual(s.formula, "Mn0.5 Fe1")
del s[1]
self.assertEqual(s.formula, "Mn0.5")
s[0] = "Fe", [0.9, 0.9, 0.9], {"magmom": 5}
self.assertEqual(s.formula, "Fe1")
self.assertEqual(s[0].magmom, 5)
def test_non_hash(self):
self.assertRaises(TypeError, dict, [(self.structure, 1)])
def test_sort(self):
s = self.structure
s[0] = "F"
s.sort()
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
s.sort(key=lambda site: site.species_string)
self.assertEqual(s[0].species_string, "F")
self.assertEqual(s[1].species_string, "Si")
s.sort(key=lambda site: site.species_string, reverse=True)
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
def test_append_insert_remove_replace(self):
s = self.structure
s.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "Si2 O1")
self.assertTrue(s.ntypesp == 2)
self.assertTrue(s.symbol_set == ("Si", "O"))
self.assertTrue(s.indices_from_symbol("Si") == (0,2))
self.assertTrue(s.indices_from_symbol("O") == (1,))
del s[2]
self.assertEqual(s.formula, "Si1 O1")
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
s.append("N", [0.25, 0.25, 0.25])
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
self.assertTrue(s.symbol_set == ("Si", "O", "N"))
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
self.assertTrue(s.indices_from_symbol("N") == (2,))
s[0] = "Ge"
self.assertEqual(s.formula, "Ge1 N1 O1")
self.assertTrue(s.symbol_set == ("Ge", "O", "N"))
s.replace_species({"Ge": "Si"})
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
s.replace_species({"Si": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.5 Ge0.5 N1 O1")
#this should change the .5Si .5Ge sites to .75Si .25Ge
s.replace_species({"Ge": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.75 Ge0.25 N1 O1")
# In this case, s.ntypesp is ambiguous.
# for the time being, we raise AttributeError.
with self.assertRaises(AttributeError):
s.ntypesp
s.remove_species(["Si"])
self.assertEqual(s.formula, "Ge0.25 N1 O1")
s.remove_sites([1, 2])
self.assertEqual(s.formula, "Ge0.25")
def test_add_site_property(self):
s = self.structure
s.add_site_property("charge", [4.1, -5])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[1].charge, -5)
s.add_site_property("magmom", [3, 2])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[0].magmom, 3)
def test_propertied_structure(self):
#Make sure that site properties are set to None for missing values.
s = self.structure
s.add_site_property("charge", [4.1, -5])
s.append("Li", [0.3, 0.3 ,0.3])
self.assertEqual(len(s.site_properties["charge"]), 3)
def test_perturb(self):
d = 0.1
pre_perturbation_sites = self.structure.sites[:]
self.structure.perturb(distance=d)
post_perturbation_sites = self.structure.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_oxidation_states(self):
oxidation_states = {"Si": -4}
self.structure.add_oxidation_state_by_element(oxidation_states)
for site in self.structure:
for k in site.species_and_occu.keys():
self.assertEqual(k.oxi_state, oxidation_states[k.symbol],
"Wrong oxidation state assigned!")
oxidation_states = {"Fe": 2}
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_element,
oxidation_states)
self.structure.add_oxidation_state_by_site([2, -4])
self.assertEqual(self.structure[0].specie.oxi_state, 2)
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_site,
[1])
def test_remove_oxidation_states(self):
co_elem = Element("Co")
o_elem = Element("O")
co_specie = Specie("Co", 2)
o_specie = Specie("O", -2)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice.cubic(10)
s_elem = Structure(lattice, [co_elem, o_elem], coords)
s_specie = Structure(lattice, [co_specie, o_specie], coords)
s_specie.remove_oxidation_states()
self.assertEqual(s_elem, s_specie, "Oxidation state remover "
"failed")
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
self.structure.apply_operation(op)
self.assertArrayAlmostEqual(
self.structure.lattice.matrix,
[[0.000000, 3.840198, 0.000000],
[-3.325710, 1.920099, 0.000000],
[2.217138, -0.000000, 3.135509]], 5)
def test_apply_strain(self):
s = self.structure
initial_coord = s[1].coords
s.apply_strain(0.01)
self.assertAlmostEqual(
s.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(s[1].coords, initial_coord * 1.01)
a1, b1, c1 = s.lattice.abc
s.apply_strain([0.1, 0.2, 0.3])
a2, b2, c2 = s.lattice.abc
self.assertAlmostEqual(a2 / a1, 1.1)
self.assertAlmostEqual(b2 / b1, 1.2)
self.assertAlmostEqual(c2 / c1, 1.3)
def test_scale_lattice(self):
initial_coord = self.structure[1].coords
self.structure.scale_lattice(self.structure.volume * 1.01 ** 3)
self.assertArrayAlmostEqual(
self.structure.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(self.structure[1].coords,
initial_coord * 1.01)
def test_translate_sites(self):
self.structure.translate_sites([0, 1], [0.5, 0.5, 0.5],
frac_coords=True)
self.assertArrayEqual(self.structure.frac_coords[0],
[0.5, 0.5, 0.5])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=False)
self.assertArrayAlmostEqual(self.structure.cart_coords[0],
[3.38014845, 1.05428585, 2.06775453])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=True, to_unit_cell=False)
self.assertArrayAlmostEqual(self.structure.frac_coords[0],
[1.00187517, 1.25665291, 1.15946374])
def test_make_supercell(self):
self.structure.make_supercell([2, 1, 1])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell([[1, 0, 0], [2, 1, 0], [0, 0, 1]])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell(2)
self.assertEqual(self.structure.formula, "Si32")
self.assertArrayAlmostEqual(self.structure.lattice.abc,
[15.360792, 35.195996, 7.680396], 5)
def test_disordered_supercell_primitive_cell(self):
l = Lattice.cubic(2)
f = [[0.5, 0.5, 0.5]]
sp = [{'Si': 0.54738}]
s = Structure(l, sp, f)
#this supercell often breaks things
s.make_supercell([[0,-1,1],[-1,1,0],[1,1,1]])
self.assertEqual(len(s.get_primitive_structure()), 1)
def test_another_supercell(self):
#this is included b/c for some reason the old algo was failing on it
s = self.structure.copy()
s.make_supercell([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
self.assertEqual(s.formula, "Si32")
s = self.structure.copy()
s.make_supercell([[0, 2, 0], [1, 0, 0], [0, 0, 1]])
self.assertEqual(s.formula, "Si4")
def test_to_from_dict(self):
d = self.structure.as_dict()
s2 = Structure.from_dict(d)
self.assertEqual(type(s2), Structure)
def test_propertied_structure_mod(self):
prop_structure = Structure(
self.structure.lattice, ["Si"] * 2, self.structure.frac_coords,
site_properties={'magmom': [5, -5]})
prop_structure.append("C", [0.25, 0.25, 0.25])
d = prop_structure.as_dict()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
s2 = Structure.from_dict(d)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
'Not all sites have property magmom. Missing values are set '
'to None.')
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr", "yaml"]:
s = self.structure.to(fmt=fmt)
self.assertIsNotNone(s)
ss = Structure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.structure.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords,
self.structure.frac_coords)
self.assertIsInstance(ss, Structure)
self.structure.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.structure.to(filename="structure_testing.json")
self.assertTrue(os.path.exists("structure_testing.json"))
s = Structure.from_file("structure_testing.json")
self.assertEqual(s, self.structure)
os.remove("structure_testing.json")
def test_from_spacegroup(self):
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1.formula, "Li8 O4")
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1, s2)
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]],
site_properties={"charge": [1, -2]})
self.assertEqual(sum(s2.site_properties["charge"]), 0)
s = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertEqual(s.formula, "Cs1 Cl1")
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.tetragonal(1, 3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_merge_sites(self):
species = [{'Ag': 0.5}, {'Cl': 0.25}, {'Cl': 0.1},
{'Ag': 0.5}, {'F': 0.15}, {'F': 0.1}]
coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5],
[0, 0, 0], [0.5, 0.5, 1.501], [0.5, 0.5, 1.501]]
s = Structure(Lattice.cubic(1), species, coords)
s.merge_sites()
self.assertEqual(s[0].specie.symbol, 'Ag')
self.assertEqual(s[1].species_and_occu,
Composition({'Cl': 0.35, 'F': 0.25}))
self.assertArrayAlmostEqual(s[1].frac_coords, [.5, .5, .5005])
class IMoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_bad_molecule(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
[-0.513360, 0.889165, -0.36301]]
self.assertRaises(StructureError, Molecule,
["C", "H", "H", "H", "H", "H"], coords,
validate_proximity=True)
def test_get_angle_dihedral(self):
self.assertAlmostEqual(self.mol.get_angle(1, 0, 2), 109.47122144618737)
self.assertAlmostEqual(self.mol.get_angle(3, 1, 2), 60.00001388659683)
self.assertAlmostEqual(self.mol.get_dihedral(0, 1, 2, 3),
- 35.26438851071765)
coords = list()
coords.append([0, 0, 0])
coords.append([0, 0, 1])
coords.append([0, 1, 1])
coords.append([1, 1, 1])
self.mol2 = Molecule(["C", "O", "N", "S"], coords)
self.assertAlmostEqual(self.mol2.get_dihedral(0, 1, 2, 3), -90)
def test_get_covalent_bonds(self):
self.assertEqual(len(self.mol.get_covalent_bonds()), 4)
def test_properties(self):
self.assertEqual(len(self.mol), 5)
self.assertTrue(self.mol.is_ordered)
self.assertEqual(self.mol.formula, "H4 C1")
def test_repr_str(self):
ans = """Full Formula (H4 C1)
Reduced Formula: H4C
Charge = 0, Spin Mult = 1
Sites (5)
0 C 0.000000 0.000000 0.000000
1 H 0.000000 0.000000 1.089000
2 H 1.026719 0.000000 -0.363000
3 H -0.513360 -0.889165 -0.363000
4 H -0.513360 0.889165 -0.363000"""
self.assertEqual(self.mol.__str__(), ans)
ans = """Molecule Summary
Site: C (0.0000, 0.0000, 0.0000)
Site: H (0.0000, 0.0000, 1.0890)
Site: H (1.0267, 0.0000, -0.3630)
Site: H (-0.5134, -0.8892, -0.3630)
Site: H (-0.5134, 0.8892, -0.3630)"""
self.assertEqual(repr(self.mol), ans)
def test_site_properties(self):
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
self.assertEqual(propertied_mol[0].magmom, 0.5)
self.assertEqual(propertied_mol[1].magmom, -0.5)
def test_get_boxed_structure(self):
s = self.mol.get_boxed_structure(9, 9, 9)
# C atom should be in center of box.
self.assertArrayAlmostEqual(s[4].frac_coords,
[0.50000001, 0.5, 0.5])
self.assertArrayAlmostEqual(s[1].frac_coords,
[0.6140799, 0.5, 0.45966667])
self.assertRaises(ValueError, self.mol.get_boxed_structure, 1, 1, 1)
s2 = self.mol.get_boxed_structure(5, 5, 5, (2, 3, 4))
self.assertEqual(len(s2), 24 * 5)
self.assertEqual(s2.lattice.abc, (10, 15, 20))
def test_get_distance(self):
self.assertAlmostEqual(self.mol.get_distance(0, 1), 1.089)
def test_get_neighbors(self):
nn = self.mol.get_neighbors(self.mol[0], 1)
self.assertEqual(len(nn), 0)
nn = self.mol.get_neighbors(self.mol[0], 2)
self.assertEqual(len(nn), 4)
def test_get_neighbors_in_shell(self):
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 0, 1)
self.assertEqual(len(nn), 1)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 2, 0.1)
self.assertEqual(len(nn), 0)
def test_get_dist_matrix(self):
ans = [[0.0, 1.089, 1.08899995636, 1.08900040717, 1.08900040717],
[1.089, 0.0, 1.77832952654, 1.7783298026, 1.7783298026],
[1.08899995636, 1.77832952654, 0.0, 1.77833003783,
1.77833003783],
[1.08900040717, 1.7783298026, 1.77833003783, 0.0, 1.77833],
[1.08900040717, 1.7783298026, 1.77833003783, 1.77833, 0.0]]
self.assertArrayAlmostEqual(self.mol.distance_matrix, ans)
def test_break_bond(self):
(mol1, mol2) = self.mol.break_bond(0, 1)
self.assertEqual(mol1.formula, "H3 C1")
self.assertEqual(mol2.formula, "H1")
def test_prop(self):
self.assertEqual(self.mol.charge, 0)
self.assertEqual(self.mol.spin_multiplicity, 1)
self.assertEqual(self.mol.nelectrons, 10)
self.assertArrayAlmostEqual(self.mol.center_of_mass, [0, 0, 0])
self.assertRaises(ValueError, Molecule, ["C", "H", "H", "H", "H"],
self.coords, charge=1, spin_multiplicity=1)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertEqual(mol.spin_multiplicity, 2)
self.assertEqual(mol.nelectrons, 9)
#Triplet O2
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
self.assertEqual(mol.spin_multiplicity, 3)
def test_equal(self):
mol = IMolecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertNotEqual(mol, self.mol)
def test_get_centered_molecule(self):
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
centered = mol.get_centered_molecule()
self.assertArrayAlmostEqual(centered.center_of_mass, [0, 0, 0])
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = IMolecule.from_dict(d)
self.assertEqual(type(mol2), IMolecule)
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
charge=1,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
d = propertied_mol.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 0.5)
mol = Molecule.from_dict(d)
self.assertEqual(propertied_mol, mol)
self.assertEqual(mol[0].magmom, 0.5)
self.assertEqual(mol.formula, "H4 C1")
self.assertEqual(mol.charge, 1)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03", "yaml"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = IMolecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, IMolecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
self.mol.to(filename="CH4_testing.yaml")
self.assertTrue(os.path.exists("CH4_testing.yaml"))
mol = Molecule.from_file("CH4_testing.yaml")
self.assertEqual(self.mol, mol)
os.remove("CH4_testing.yaml")
class MoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_mutable_sequence_methods(self):
s = self.mol
s[1] = ("F", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "H3 C1 F1")
self.assertArrayAlmostEqual(s[1].coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("H"))
self.assertArrayAlmostEqual(s[0].coords,
[-0.513360, 0.889165, -0.363000])
del s[1]
self.assertEqual(s.formula, "H2 C1 F1")
s[3] = "N", [0,0,0], {"charge": 4}
self.assertEqual(s.formula, "H2 N1 F1")
self.assertEqual(s[3].charge, 4)
def test_insert_remove_append(self):
mol = self.mol
mol.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol.formula, "H4 C1 O1")
del mol[2]
self.assertEqual(mol.formula, "H3 C1 O1")
mol.set_charge_and_spin(0)
self.assertEqual(mol.spin_multiplicity, 2)
mol.append("N", [0.25, 0.25, 0.25])
self.assertEqual(mol.formula, "H3 C1 N1 O1")
self.assertRaises(TypeError, dict, [(mol, 1)])
mol.remove_sites([0, 1])
self.assertEqual(mol.formula, "H3 N1")
def test_translate_sites(self):
self.mol.translate_sites([0, 1], [0.5, 0.5, 0.5])
self.assertArrayEqual(self.mol.cart_coords[0],
[0.5, 0.5, 0.5])
def test_replace(self):
self.mol[0] = "Ge"
self.assertEqual(self.mol.formula, "Ge1 H4")
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.5 Ge0.5 H4")
#this should change the .5Si .5Ge sites to .75Si .25Ge
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.75 Ge0.25 H4")
d = 0.1
pre_perturbation_sites = self.mol.sites[:]
self.mol.perturb(distance=d)
post_perturbation_sites = self.mol.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_site_property(self):
self.mol.add_site_property("charge", [4.1, -2, -2, -2, -2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[1].charge, -2)
self.mol.add_site_property("magmom", [3, 2, 2, 2, 2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[0].magmom, 3)
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = Molecule.from_dict(d)
self.assertEqual(type(mol2), Molecule)
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
self.mol.apply_operation(op)
self.assertArrayAlmostEqual(self.mol[2].coords,
[0.000000, 1.026719, -0.363000])
def test_substitute(self):
coords = [[0.000000, 0.000000, 1.08],
[0.000000, 0.000000, 0.000000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
sub = Molecule(["X", "C", "H", "H", "H"], coords)
self.mol.substitute(1, sub)
self.assertAlmostEqual(self.mol.get_distance(0, 4), 1.54)
f = Molecule(["X", "F"], [[0, 0, 0], [0, 0, 1.11]])
self.mol.substitute(2, f)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.35)
oh = Molecule(["X", "O", "H"],
[[0, 0.780362, -.456316], [0, 0, .114079],
[0, -.780362, -.456316]])
self.mol.substitute(1, oh)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.43)
self.mol.substitute(3, "methyl")
self.assertEqual(self.mol.formula, "H7 C3 O1 F1")
coords = [[0.00000, 1.40272, 0.00000],
[0.00000, 2.49029, 0.00000],
[-1.21479, 0.70136, 0.00000],
[-2.15666, 1.24515, 0.00000],
[-1.21479, -0.70136, 0.00000],
[-2.15666, -1.24515, 0.00000],
[0.00000, -1.40272, 0.00000],
[0.00000, -2.49029, 0.00000],
[1.21479, -0.70136, 0.00000],
[2.15666, -1.24515, 0.00000],
[1.21479, 0.70136, 0.00000],
[2.15666, 1.24515, 0.00000]]
benzene = Molecule(["C", "H", "C", "H", "C", "H", "C", "H", "C", "H",
"C", "H"], coords)
benzene.substitute(1, sub)
self.assertEqual(benzene.formula, "H8 C7")
#Carbon attached should be in plane.
self.assertAlmostEqual(benzene[11].coords[2], 0)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = Molecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, Molecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
if __name__ == '__main__':
import unittest
unittest.main()
|
|
import warnings
from functools import update_wrapper, wraps
from unittest import TestCase
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import (
login_required, permission_required, user_passes_test,
)
from django.http import HttpRequest, HttpResponse, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.views.decorators.cache import (
cache_control, cache_page, never_cache,
)
from django.views.decorators.clickjacking import (
xframe_options_deny, xframe_options_exempt, xframe_options_sameorigin,
)
from django.views.decorators.http import (
condition, require_GET, require_http_methods, require_POST, require_safe,
)
from django.views.decorators.vary import vary_on_cookie, vary_on_headers
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60 * 15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u: True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
allow_lazy,
lazy,
)
# suppress the deprecation warning of memoize
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
fully_decorated = memoize(fully_decorated, {}, 1)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object):
pass
class DummyRequest(object):
pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'HEAD'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'POST'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'PUT'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'DELETE'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class ClsDec(object):
def __init__(self, myattr):
self.myattr = myattr
def __call__(self, f):
def wrapped():
return f() and self.myattr
return update_wrapper(wrapped, f)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
# Test for argumented decorator
def test_argumented(self):
class Test(object):
@method_decorator(ClsDec(False))
def method(self):
return True
self.assertEqual(Test().method(), False)
def test_descriptors(self):
def original_dec(wrapped):
def _wrapped(arg):
return wrapped(arg)
return _wrapped
method_dec = method_decorator(original_dec)
class bound_wrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __call__(self, arg):
return self.wrapped(arg)
def __get__(self, instance, owner):
return self
class descriptor_wrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __get__(self, instance, owner):
return bound_wrapper(self.wrapped.__get__(instance, owner))
class Test(object):
@method_dec
@descriptor_wrapper
def method(self, arg):
return arg
self.assertEqual(Test().method(1), 1)
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
class NeverCacheDecoratorTest(TestCase):
def test_never_cache_decorator(self):
@never_cache
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(
set(r['Cache-Control'].split(', ')),
{'max-age=0', 'no-cache', 'no-store', 'must-revalidate'},
)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
# TODO(jbudorick): Split these constants into coherent modules.
# pylint: disable=W0212
import collections
import logging
import os
import subprocess
DIR_SOURCE_ROOT = os.environ.get('CHECKOUT_SOURCE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, os.pardir)))
ISOLATE_DEPS_DIR = os.path.join(DIR_SOURCE_ROOT, 'isolate_deps_dir')
CHROME_SHELL_HOST_DRIVEN_DIR = os.path.join(
DIR_SOURCE_ROOT, 'chrome', 'android')
PackageInfo = collections.namedtuple('PackageInfo',
['package', 'activity', 'cmdline_file', 'devtools_socket',
'test_package'])
PACKAGE_INFO = {
'chrome_document': PackageInfo(
'com.google.android.apps.chrome.document',
'com.google.android.apps.chrome.document.ChromeLauncherActivity',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome': PackageInfo(
'com.google.android.apps.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
'com.google.android.apps.chrome.tests'),
'chrome_beta': PackageInfo(
'com.chrome.beta',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_stable': PackageInfo(
'com.android.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_dev': PackageInfo(
'com.chrome.dev',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_canary': PackageInfo(
'com.chrome.canary',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chrome_work': PackageInfo(
'com.chrome.work',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'chromium': PackageInfo(
'org.chromium.chrome',
'com.google.android.apps.chrome.Main',
'/data/local/chrome-command-line',
'chrome_devtools_remote',
None),
'legacy_browser': PackageInfo(
'com.google.android.browser',
'com.android.browser.BrowserActivity',
None,
None,
None),
'chromecast_shell': PackageInfo(
'com.google.android.apps.mediashell',
'com.google.android.apps.mediashell.MediaShellActivity',
'/data/local/tmp/castshell-command-line',
None,
None),
'content_shell': PackageInfo(
'org.chromium.content_shell_apk',
'org.chromium.content_shell_apk.ContentShellActivity',
'/data/local/tmp/content-shell-command-line',
None,
'org.chromium.content_shell_apk.tests'),
'chrome_shell': PackageInfo(
'org.chromium.chrome.shell',
'org.chromium.chrome.shell.ChromeShellActivity',
'/data/local/tmp/chrome-shell-command-line',
'chrome_shell_devtools_remote',
'org.chromium.chrome.shell.tests'),
'android_webview_shell': PackageInfo(
'org.chromium.android_webview.shell',
'org.chromium.android_webview.shell.AwShellActivity',
'/data/local/tmp/android-webview-command-line',
None,
'org.chromium.android_webview.test'),
'gtest': PackageInfo(
'org.chromium.native_test',
'org.chromium.native_test.NativeUnitTestActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'components_browsertests': PackageInfo(
'org.chromium.components_browsertests_apk',
('org.chromium.components_browsertests_apk' +
'.ComponentsBrowserTestsActivity'),
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'content_browsertests': PackageInfo(
'org.chromium.content_browsertests_apk',
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity',
'/data/local/tmp/chrome-native-tests-command-line',
None,
None),
'chromedriver_webview_shell': PackageInfo(
'org.chromium.chromedriver_webview_shell',
'org.chromium.chromedriver_webview_shell.Main',
None,
None,
None),
}
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
TEST_SEARCH_BY_IMAGE_SERVER_PORT = 9041
TEST_POLICY_SERVER_PORT = 9051
# The net test server is started from port 10201.
# TODO(pliard): http://crbug.com/239014. Remove this dirty workaround once
# http://crbug.com/239014 is fixed properly.
TEST_SERVER_PORT_FIRST = 10201
TEST_SERVER_PORT_LAST = 30000
# A file to record next valid port of test server.
TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
ADB_KEYS_FILE = '/data/misc/adb/adb_keys'
PERF_OUTPUT_DIR = os.path.join(DIR_SOURCE_ROOT, 'out', 'step_results')
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = (
'/data/data/' + PACKAGE_INFO['chrome'].package + '/files')
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
class ANDROID_SDK_VERSION_CODES(object):
"""Android SDK version codes.
http://developer.android.com/reference/android/os/Build.VERSION_CODES.html
"""
ICE_CREAM_SANDWICH = 14
ICE_CREAM_SANDWICH_MR1 = 15
JELLY_BEAN = 16
JELLY_BEAN_MR1 = 17
JELLY_BEAN_MR2 = 18
KITKAT = 19
KITKAT_WATCH = 20
LOLLIPOP = 21
LOLLIPOP_MR1 = 22
MARSHMALLOW = 23
ANDROID_SDK_VERSION = ANDROID_SDK_VERSION_CODES.MARSHMALLOW
ANDROID_SDK_BUILD_TOOLS_VERSION = '23.0.0'
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party/android_tools/sdk')
ANDROID_SDK_TOOLS = os.path.join(ANDROID_SDK_ROOT,
'build-tools', ANDROID_SDK_BUILD_TOOLS_VERSION)
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party/android_tools/ndk')
EMULATOR_SDK_ROOT = os.environ.get('ANDROID_EMULATOR_SDK_ROOT',
os.path.join(DIR_SOURCE_ROOT,
'android_emulator_sdk'))
BAD_DEVICES_JSON = os.path.join(DIR_SOURCE_ROOT,
os.environ.get('CHROMIUM_OUT_DIR', 'out'),
'bad_devices.json')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
DEVICE_LOCAL_PROPERTIES_PATH = '/data/local.prop'
PYTHON_UNIT_TEST_SUITES = {
'pylib_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android'),
'test_modules': [
'pylib.cmd_helper_test',
'pylib.device.device_utils_test',
'pylib.results.json_results_test',
'pylib.utils.md5sum_test',
]
},
'gyp_py_unittests': {
'path': os.path.join(DIR_SOURCE_ROOT, 'build', 'android', 'gyp'),
'test_modules': [
'java_cpp_enum_tests',
]
},
}
LOCAL_MACHINE_TESTS = ['junit', 'python']
VALID_ENVIRONMENTS = ['local', 'remote_device']
VALID_TEST_TYPES = ['gtest', 'instrumentation', 'junit', 'linker', 'monkey',
'perf', 'python', 'uiautomator', 'uirobot']
VALID_DEVICE_TYPES = ['Android', 'iOS']
def GetBuildType():
try:
return os.environ['BUILDTYPE']
except KeyError:
raise EnvironmentError(
'The BUILDTYPE environment variable has not been set')
def SetBuildType(build_type):
os.environ['BUILDTYPE'] = build_type
def SetBuildDirectory(build_directory):
os.environ['CHROMIUM_OUT_DIR'] = build_directory
def SetOutputDirectory(output_directory):
os.environ['CHROMIUM_OUTPUT_DIR'] = output_directory
def GetOutDirectory(build_type=None):
"""Returns the out directory where the output binaries are built.
Args:
build_type: Build type, generally 'Debug' or 'Release'. Defaults to the
globally set build type environment variable BUILDTYPE.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
GetBuildType() if build_type is None else build_type))
def _Memoize(func):
def Wrapper():
try:
return func._result
except AttributeError:
func._result = func()
return func._result
return Wrapper
def SetAdbPath(adb_path):
os.environ['ADB_PATH'] = adb_path
def GetAdbPath():
# Check if a custom adb path as been set. If not, try to find adb
# on the system.
if os.environ.get('ADB_PATH'):
return os.environ.get('ADB_PATH')
else:
return _FindAdbPath()
@_Memoize
def _FindAdbPath():
if os.environ.get('ANDROID_SDK_ROOT'):
return 'adb'
# If envsetup.sh hasn't been sourced and there's no adb in the path,
# set it here.
try:
with file(os.devnull, 'w') as devnull:
subprocess.call(['adb', 'version'], stdout=devnull, stderr=devnull)
return 'adb'
except OSError:
logging.debug('No adb found in $PATH, fallback to checked in binary.')
return os.path.join(ANDROID_SDK_ROOT, 'platform-tools', 'adb')
# Exit codes
ERROR_EXIT_CODE = 1
INFRA_EXIT_CODE = 87
WARNING_EXIT_CODE = 88
|
|
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import text_type
from security_monkey.views import AuthenticatedService
from security_monkey.views import IGNORELIST_FIELDS
from security_monkey.datastore import IgnoreListEntry
from security_monkey.datastore import Technology
from security_monkey import db, rbac
from flask_restful import marshal, reqparse
class IgnoreListGetPutDelete(AuthenticatedService):
decorators = [
rbac.allow(["Admin"], ["GET", "PUT", "DELETE"]),
rbac.allow(["View"], ["GET"])
]
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(IgnoreListGetPutDelete, self).__init__()
def get(self, item_id):
"""
.. http:get:: /api/1/ignorelistentries/<int:id>
Get the ignorelist entry with the given ID.
**Example Request**:
.. sourcecode:: http
GET /api/1/ignorelistentries/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"id": 123,
"prefix": "noisy_",
"notes": "Security Monkey shouldn't track noisy_* objects",
"technology": "securitygroup",
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 404: item with given ID not found
:statuscode 401: Authentication failure. Please login.
"""
result = IgnoreListEntry.query.filter(IgnoreListEntry.id == item_id).first()
if not result:
return {"status": "Ignorelist entry with the given ID not found."}, 404
ignorelistentry_marshaled = marshal(result.__dict__, IGNORELIST_FIELDS)
ignorelistentry_marshaled['technology'] = result.technology.name
ignorelistentry_marshaled['auth'] = self.auth_dict
return ignorelistentry_marshaled, 200
def put(self, item_id):
"""
.. http:get:: /api/1/ignorelistentries/<int:id>
Update the ignorelist entry with the given ID.
**Example Request**:
.. sourcecode:: http
PUT /api/1/ignorelistentries/123 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"id": 123,
"prefix": "noisy_",
"notes": "Security Monkey shouldn't track noisy_* objects",
"technology": "securitygroup"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"id": 123,
"prefix": "noisy_",
"notes": "Security Monkey shouldn't track noisy_* objects",
"technology": "securitygroup",
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 404: item with given ID not found
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument('prefix', required=True, type=text_type, help='A prefix must be provided which matches the objects you wish to ignore.', location='json')
self.reqparse.add_argument('notes', required=False, type=text_type, help='Add context.', location='json')
self.reqparse.add_argument('technology', required=True, type=text_type, help='Technology name required.', location='json')
args = self.reqparse.parse_args()
prefix = args['prefix']
technology = args.get('technology', True)
notes = args.get('notes', None)
result = IgnoreListEntry.query.filter(IgnoreListEntry.id == item_id).first()
if not result:
return {"status": "Ignore list entry with the given ID not found."}, 404
result.prefix = prefix
result.notes = notes
technology = Technology.query.filter(Technology.name == technology).first()
if not technology:
return {"status": "Could not find a technology with the given name"}, 500
result.tech_id = technology.id
db.session.add(result)
db.session.commit()
db.session.refresh(result)
ignorelistentry_marshaled = marshal(result.__dict__, IGNORELIST_FIELDS)
ignorelistentry_marshaled['technology'] = result.technology.name
ignorelistentry_marshaled['auth'] = self.auth_dict
return ignorelistentry_marshaled, 200
def delete(self, item_id):
"""
.. http:delete:: /api/1/ignorelistentries/123
Delete a ignorelist entry.
**Example Request**:
.. sourcecode:: http
DELETE /api/1/ignorelistentries/123 HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202 Accepted
Vary: Accept
Content-Type: application/json
{
'status': 'deleted'
}
:statuscode 202: accepted
:statuscode 401: Authentication Error. Please Login.
"""
IgnoreListEntry.query.filter(IgnoreListEntry.id == item_id).delete()
db.session.commit()
return {'status': 'deleted'}, 202
class IgnorelistListPost(AuthenticatedService):
decorators = [
rbac.allow(["Admin"], ["GET", "POST"]),
rbac.allow(["View"], ["GET"])
]
def get(self):
"""
.. http:get:: /api/1/ignorelistentries
Get a list of Ignorelist entries.
**Example Request**:
.. sourcecode:: http
GET /api/1/ignorelistentries HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
count: 1,
items: [
{
"id": 123,
"prefix": "noisy_",
"notes": "Security Monkey shouldn't track noisy_* objects",
"technology": "securitygroup"
},
],
total: 1,
page: 1,
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
self.reqparse.add_argument('count', type=int, default=30, location='args')
self.reqparse.add_argument('page', type=int, default=1, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
result = IgnoreListEntry.query.order_by(IgnoreListEntry.id).paginate(page, count, error_out=False)
items = []
for entry in result.items:
ignorelistentry_marshaled = marshal(entry.__dict__, IGNORELIST_FIELDS)
ignorelistentry_marshaled["technology"] = entry.technology.name
items.append(ignorelistentry_marshaled)
marshaled_dict = {
'total': result.total,
'count': len(items),
'page': result.page,
'items': items,
'auth': self.auth_dict
}
return marshaled_dict, 200
def post(self):
"""
.. http:post:: /api/1/ignorelistentries
Create a new ignore list entry.
**Example Request**:
.. sourcecode:: http
POST /api/1/ignorelistentries HTTP/1.1
Host: example.com
Accept: application/json
{
"prefix": "noisy_",
"notes": "Security Monkey shouldn't track noisy_* objects",
"technology": "securitygroup"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Vary: Accept
Content-Type: application/json
{
"id": 123,
"prefix": "noisy_",
"notes": "Security Monkey shouldn't track noisy_* objects",
"technology": "securitygroup"
}
:statuscode 201: created
:statuscode 401: Authentication Error. Please Login.
"""
self.reqparse.add_argument('prefix', required=True, type=text_type, help='A prefix must be provided which matches the objects you wish to ignore.', location='json')
self.reqparse.add_argument('notes', required=False, type=text_type, help='Add context.', location='json')
self.reqparse.add_argument('technology', required=True, type=text_type, help='Technology name required.', location='json')
args = self.reqparse.parse_args()
prefix = args['prefix']
technology = args.get('technology', True)
notes = args.get('notes', None)
entry = IgnoreListEntry()
entry.prefix = prefix
if notes:
entry.notes = notes
technology = Technology.query.filter(Technology.name == technology).first()
if not technology:
return {"status": "Could not find a technology with the given name"}, 500
entry.tech_id = technology.id
db.session.add(entry)
db.session.commit()
db.session.refresh(entry)
ignorelistentry_marshaled = marshal(entry.__dict__, IGNORELIST_FIELDS)
ignorelistentry_marshaled['technology'] = entry.technology.name
ignorelistentry_marshaled['auth'] = self.auth_dict
return ignorelistentry_marshaled, 201
|
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib import hub
LOG = logging.getLogger('ryu.lib.ofctl_v1_3')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
elif action_type == 'PUSH_PBB':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushPbb(ethertype)
elif action_type == 'POP_PBB':
result = parser.OFPActionPopPbb()
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif action_type == 'METER':
meter_id = int(a.get('meter_id'))
inst.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown action type: %s', action_type)
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_3.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_3.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_3.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_3.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_3.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_3.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_3.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
elif action_type == ofproto_v1_3.OFPAT_PUSH_PBB:
buf = 'PUSH_PBB:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_PBB:
buf = 'POP_PBB'
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionActions):
for a in instruction.actions:
actions.append(action_to_str(a))
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionMeter):
buf = 'METER:' + str(instruction.meter_id)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_masked_int,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int,
'mpls_bos': int,
'pbb_isid': int,
'tunnel_id': int,
'ipv6_exthdr': to_match_masked_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
(ip_addr, ip_mask) = value.split('/')
if ip_mask.isdigit():
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_3.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_3.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
elif key == 'metadata' or key == 'ipv6_exthdr':
value = match_masked_int_to_str(value, mask)
else:
if mask is not None:
value = value + '/' + mask
else:
value = value
match.setdefault(key, value)
return match
def match_masked_int_to_str(value, mask):
return '%d/%d' % (value, mask) if mask else '%d' % value
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_3.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_3.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'duration_nsec': stat.duration_nsec,
'duration_sec': stat.duration_sec,
'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length,
'flags': stats.flags}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_aggregate_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
stats = msg.body
s = {'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'flow_count': stats.flow_count}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, dp.ofproto.OFPP_ANY)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_meter_stats(dp, waiters):
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
meters = []
for msg in msgs:
for stats in msg.body:
bands = []
for band in stats.band_stats:
b = {'packet_band_count': band.packet_band_count,
'byte_band_count': band.byte_band_count}
bands.append(b)
s = {'meter_id': stats.meter_id,
'len': stats.len,
'flow_count': stats.flow_count,
'packet_in_count': stats.packet_in_count,
'byte_in_count': stats.byte_in_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'band_stats': bands}
meters.append(s)
meters = {str(dp.id): meters}
return meters
def get_meter_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
band_types.append(v)
capabilities = []
for k, v in capa_convert.items():
if k & feature.capabilities:
capabilities.append(v)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
features = {str(dp.id): features}
return features
def get_meter_config(dp, waiters):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
band_type = {dp.ofproto.OFPMBT_DROP: 'DROP',
dp.ofproto.OFPMBT_DSCP_REMARK: 'DSCP_REMARK',
dp.ofproto.OFPMBT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
configs = []
for msg in msgs:
for config in msg.body:
bands = []
for band in config.bands:
b = {'type': band_type.get(band.type, ''),
'rate': band.rate,
'burst_size': band.burst_size}
if band.type == dp.ofproto.OFPMBT_DSCP_REMARK:
b['prec_level'] = band.prec_level
elif band.type == dp.ofproto.OFPMBT_EXPERIMENTER:
b['experimenter'] = band.experimenter
bands.append(b)
c_flags = []
for k, v in flags.items():
if k & config.flags:
c_flags.append(v)
c = {'flags': c_flags,
'meter_id': config.meter_id,
'bands': bands}
configs.append(c)
configs = {str(dp.id): configs}
return configs
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, dp.ofproto.OFPG_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = {'packet_count': bucket_stat.packet_count,
'byte_count': bucket_stat.byte_count}
bucket_stats.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'bucket_stats': bucket_stats}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_meter_entry(dp, flow, cmd):
flags_convert = {'KBPS': dp.ofproto.OFPMF_KBPS,
'PKTPS': dp.ofproto.OFPMF_PKTPS,
'BURST': dp.ofproto.OFPMF_BURST,
'STATS': dp.ofproto.OFPMF_STATS}
flow_flags = flow.get('flags')
if not isinstance(flow_flags, list):
flow_flags = [flow_flags]
flags = 0
for flag in flow_flags:
flags |= flags_convert.get(flag, 0)
if not flags:
LOG.error('Unknown flags: %s', flow.get('flags'))
meter_id = int(flow.get('meter_id', 0))
bands = []
for band in flow.get('bands', []):
band_type = band.get('type')
rate = int(band.get('rate', 0))
burst_size = int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
dp.send_msg(meter_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.error('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise, link_rate)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.error('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"ReparameterizationType",
"FULLY_REPARAMETERIZED",
"NOT_REPARAMETERIZED",
"Distribution",
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape",
"batch_shape_tensor",
"cdf",
"covariance",
"cross_entropy",
"entropy",
"event_shape",
"event_shape_tensor",
"kl_divergence",
"log_cdf",
"log_prob",
"log_survival_function",
"mean",
"mode",
"prob",
"sample",
"stddev",
"survival_function",
"variance",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@tf_export("distributions.ReparameterizationType")
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
tf_export("distributions.FULLY_REPARAMETERIZED").export_constant(
__name__, "FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
tf_export("distributions.NOT_REPARAMETERIZED").export_constant(
__name__, "NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
@tf_export("distributions.Distribution")
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
if not name or name[-1] != "/": # `name` is not a name scope
non_unique_name = name or type(self).__name__
with ops.name_scope(non_unique_name) as name:
pass
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used:
# `parameters = dict(locals())`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._event_shape())
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError:
return math_ops.log(self._prob(value, **kwargs))
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError:
return math_ops.exp(self._log_prob(value, **kwargs))
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError:
return math_ops.log(self._cdf(value, **kwargs))
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError:
return math_ops.exp(self._log_cdf(value, **kwargs))
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError:
return math_ops.log1p(-self.cdf(value, **kwargs))
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError:
return 1. - self.cdf(value, **kwargs)
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented")
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._quantile(value, **kwargs)
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError:
return math_ops.square(self._stddev())
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError:
return math_ops.sqrt(self._variance())
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tf.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tf.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
def __str__(self):
return ("tf.distributions.{type_name}("
"\"{self_name}\""
"{maybe_batch_shape}"
"{maybe_event_shape}"
", dtype={dtype})".format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(", batch_shape={}".format(self.batch_shape)
if self.batch_shape.ndims is not None
else ""),
maybe_event_shape=(", event_shape={}".format(self.event_shape)
if self.event_shape.ndims is not None
else ""),
dtype=self.dtype.name))
def __repr__(self):
return ("<tf.distributions.{type_name} "
"'{self_name}'"
" batch_shape={batch_shape}"
" event_shape={event_shape}"
" dtype={dtype}>".format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
event_shape=self.event_shape,
dtype=self.dtype.name))
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
|
from typing import Any, Collection, Dict, Optional, Tuple, cast
from ..language import ast, DirectiveLocation
from ..pyutils import inspect, is_description
from .assert_name import assert_name
from .definition import GraphQLArgument, GraphQLInputType, GraphQLNonNull, is_input_type
from .scalars import GraphQLBoolean, GraphQLString
try:
from typing import TypedDict
except ImportError: # Python < 3.8
from typing_extensions import TypedDict
__all__ = [
"is_directive",
"assert_directive",
"is_specified_directive",
"specified_directives",
"GraphQLDirective",
"GraphQLDirectiveKwargs",
"GraphQLIncludeDirective",
"GraphQLSkipDirective",
"GraphQLDeprecatedDirective",
"GraphQLSpecifiedByDirective",
"DirectiveLocation",
"DEFAULT_DEPRECATION_REASON",
]
class GraphQLDirectiveKwargs(TypedDict, total=False):
name: str
locations: Tuple[DirectiveLocation, ...]
args: Dict[str, GraphQLArgument]
is_repeatable: bool
description: Optional[str]
extensions: Dict[str, Any]
ast_node: Optional[ast.DirectiveDefinitionNode]
class GraphQLDirective:
"""GraphQL Directive
Directives are used by the GraphQL runtime as a way of modifying execution behavior.
Type system creators will usually not create these directly.
"""
name: str
locations: Tuple[DirectiveLocation, ...]
is_repeatable: bool
args: Dict[str, GraphQLArgument]
description: Optional[str]
extensions: Dict[str, Any]
ast_node: Optional[ast.DirectiveDefinitionNode]
def __init__(
self,
name: str,
locations: Collection[DirectiveLocation],
args: Optional[Dict[str, GraphQLArgument]] = None,
is_repeatable: bool = False,
description: Optional[str] = None,
extensions: Optional[Dict[str, Any]] = None,
ast_node: Optional[ast.DirectiveDefinitionNode] = None,
) -> None:
assert_name(name)
try:
locations = tuple(
value
if isinstance(value, DirectiveLocation)
else DirectiveLocation[cast(str, value)]
for value in locations
)
except (KeyError, TypeError):
raise TypeError(
f"{name} locations must be specified"
" as a collection of DirectiveLocation enum values."
)
if args is None:
args = {}
elif not isinstance(args, dict) or not all(
isinstance(key, str) for key in args
):
raise TypeError(f"{name} args must be a dict with argument names as keys.")
elif not all(
isinstance(value, GraphQLArgument) or is_input_type(value)
for value in args.values()
):
raise TypeError(
f"{name} args must be GraphQLArgument or input type objects."
)
else:
args = {
assert_name(name): value
if isinstance(value, GraphQLArgument)
else GraphQLArgument(cast(GraphQLInputType, value))
for name, value in args.items()
}
if not isinstance(is_repeatable, bool):
raise TypeError(f"{name} is_repeatable flag must be True or False.")
if ast_node and not isinstance(ast_node, ast.DirectiveDefinitionNode):
raise TypeError(f"{name} AST node must be a DirectiveDefinitionNode.")
if description is not None and not is_description(description):
raise TypeError(f"{name} description must be a string.")
if extensions is None:
extensions = {}
elif not isinstance(extensions, dict) or not all(
isinstance(key, str) for key in extensions
):
raise TypeError(f"{name} extensions must be a dictionary with string keys.")
self.name = name
self.locations = locations
self.args = args
self.is_repeatable = is_repeatable
self.description = description
self.extensions = extensions
self.ast_node = ast_node
def __str__(self) -> str:
return f"@{self.name}"
def __repr__(self) -> str:
return f"<{self.__class__.__name__}({self})>"
def __eq__(self, other: Any) -> bool:
return self is other or (
isinstance(other, GraphQLDirective)
and self.name == other.name
and self.locations == other.locations
and self.args == other.args
and self.is_repeatable == other.is_repeatable
and self.description == other.description
and self.extensions == other.extensions
)
def to_kwargs(self) -> GraphQLDirectiveKwargs:
return GraphQLDirectiveKwargs(
name=self.name,
locations=self.locations,
args=self.args,
is_repeatable=self.is_repeatable,
description=self.description,
extensions=self.extensions,
ast_node=self.ast_node,
)
def __copy__(self) -> "GraphQLDirective": # pragma: no cover
return self.__class__(**self.to_kwargs())
def is_directive(directive: Any) -> bool:
"""Test if the given value is a GraphQL directive."""
return isinstance(directive, GraphQLDirective)
def assert_directive(directive: Any) -> GraphQLDirective:
if not is_directive(directive):
raise TypeError(f"Expected {inspect(directive)} to be a GraphQL directive.")
return cast(GraphQLDirective, directive)
# Used to conditionally include fields or fragments.
GraphQLIncludeDirective = GraphQLDirective(
name="include",
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.FRAGMENT_SPREAD,
DirectiveLocation.INLINE_FRAGMENT,
],
args={
"if": GraphQLArgument(
GraphQLNonNull(GraphQLBoolean), description="Included when true."
)
},
description="Directs the executor to include this field or fragment"
" only when the `if` argument is true.",
)
# Used to conditionally skip (exclude) fields or fragments:
GraphQLSkipDirective = GraphQLDirective(
name="skip",
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.FRAGMENT_SPREAD,
DirectiveLocation.INLINE_FRAGMENT,
],
args={
"if": GraphQLArgument(
GraphQLNonNull(GraphQLBoolean), description="Skipped when true."
)
},
description="Directs the executor to skip this field or fragment"
" when the `if` argument is true.",
)
# Constant string used for default reason for a deprecation:
DEFAULT_DEPRECATION_REASON = "No longer supported"
# Used to declare element of a GraphQL schema as deprecated:
GraphQLDeprecatedDirective = GraphQLDirective(
name="deprecated",
locations=[
DirectiveLocation.FIELD_DEFINITION,
DirectiveLocation.ARGUMENT_DEFINITION,
DirectiveLocation.INPUT_FIELD_DEFINITION,
DirectiveLocation.ENUM_VALUE,
],
args={
"reason": GraphQLArgument(
GraphQLString,
description="Explains why this element was deprecated,"
" usually also including a suggestion for how to access"
" supported similar data."
" Formatted using the Markdown syntax, as specified by"
" [CommonMark](https://commonmark.org/).",
default_value=DEFAULT_DEPRECATION_REASON,
)
},
description="Marks an element of a GraphQL schema as no longer supported.",
)
# Used to provide a URL for specifying the behaviour of custom scalar definitions:
GraphQLSpecifiedByDirective = GraphQLDirective(
name="specifiedBy",
locations=[DirectiveLocation.SCALAR],
args={
"url": GraphQLArgument(
GraphQLNonNull(GraphQLString),
description="The URL that specifies the behaviour of this scalar.",
)
},
description="Exposes a URL that specifies the behaviour of this scalar.",
)
specified_directives: Tuple[GraphQLDirective, ...] = (
GraphQLIncludeDirective,
GraphQLSkipDirective,
GraphQLDeprecatedDirective,
GraphQLSpecifiedByDirective,
)
"""A tuple with all directives from the GraphQL specification"""
def is_specified_directive(directive: GraphQLDirective) -> bool:
"""Check whether the given directive is one of the specified directives."""
return any(
specified_directive.name == directive.name
for specified_directive in specified_directives
)
|
|
"""
Runs taggd, a tool to demultiplex (link molecular barcodes back to) a file of genetic reads,
typically obtained by sequencing. For matched reads, the barcode and its related properties
are added to the read. Unmatched reads, ambiguously matched reads, and stats are by default
produced as output files as well.
The input ID file should be tab-delimited with the following format:
<barcode> <prop1> <prop2> ...
<barcode> <prop1> <prop2> ...
The input files can be in FASTA, FASTQ, SAM or BAM format. Matched files will be appended
with the barcode and properties like this:
B0:Z:<barcode> B1:Z:<prop1> B2:Z:<prop3> ...
Source: https://github.com/SpatialTranscriptomicsResearch/taggd
Python package: https://pypi.python.org/pypi/taggd
Contact: joel.sjostrand@gmail.com;jose.fernandez.navarro@scilifelab.se
"""
import os
import time
import multiprocessing as mp
import argparse
import taggd.io.barcode_utils as bu
import taggd.core.demultiplex_core_functions as core
import taggd.core.demultiplex_sub_functions as sub
import taggd.core.demultiplex_search_functions as srch
def main(argv=None):
"""
Main application.
Starts a timer, create parameter parsers, parsers parameters
and run all the steps for the demultiplexing.
"""
start_time = time.time()
# Create a parser
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
# Needed parameters
parser.add_argument('barcodes_infile',
metavar='barcodes-infile',
help="The file with true barcode IDs and other properties.")
parser.add_argument('reads_infile',
metavar='reads-infile',
help="The FASTQ, FASTA, SAM or BAM file with reads.")
parser.add_argument('outfile_prefix',
metavar='outfile-prefix', help="The output files prefix.")
# Optional arguments.
parser.add_argument('--no-matched-output',
help='Do not output matched reads',
default=False, action='store_true')
parser.add_argument('--no-ambiguous-output',
help='Do not output ambiguous reads',
default=False, action='store_true')
parser.add_argument('--no-unmatched-output',
help='Do not output unmatched reads',
default=False, action='store_true')
parser.add_argument('--no-results-output',
help='Do not output a tab-separated results file with stats on the reads',
default=False, action='store_true')
parser.add_argument('--start-position',
type=int,
help='The start position for barcodes in reads (default: %(default)d)',
default=0, metavar="[int]")
parser.add_argument('--k',
type=int,
help='The kmer length (default: %(default)d)',
default=6, metavar="[int]")
parser.add_argument('--max-edit-distance',
type=int,
help='The max edit distance for allowing hits (default: %(default)d)',
default=2, metavar="[int]")
parser.add_argument('--metric',
help= "Distance metric: Subglobal, Levenshtein or Hamming (default: %(default)s)",
default="Subglobal", metavar="[string]")
parser.add_argument('--ambiguity-factor',
type=float,
help='Top matches within this factor from the best match are considered ambiguous,\n'
'for instance with factor=1.5, having one match with distance 2 and two matches\n'
'with distance 4 yields all three matches as ambiguous hits. Perfect hits are always\n'
'considered non-ambiguous, irrespective of factor. (default: %(default)d)',
default=1.0, metavar="[int]")
parser.add_argument('--slider-increment',
type=int, help="Space between kmer searches, " \
"0 yields kmer length (default: %(default)d)",
default=0, metavar="[int]")
parser.add_argument('--overhang',
type=int,
help="Additional flanking bases around read barcode\n" \
"to allow for insertions when matching (default: %(default)d)",
default=2, metavar="[int]")
parser.add_argument('--seed',
help="Random number generator seed for shuffling ambiguous hits (default: %(default)s)",
default=None, metavar="[string]")
parser.add_argument('--homopolymer-filter',
type=int,
help="If set, excludes reads where the barcode part contains\n" \
"a homopolymer of the given length,\n" \
"0 means no filter (default: %(default)d)",
default=8, metavar="[int]")
parser.add_argument('--subprocesses',
type=int,
help="Number of subprocesses started (default: 0, yielding number of machine cores - 1)",
default=0, metavar="[int]")
parser.add_argument('--estimate-min-edit-distance',
type=int,
help="If set, estimates the min edit distance among true\n" \
"barcodes by comparing the specified number of pairs,\n" \
"0 means no estimation (default: %(default)d)",
default=0, metavar="[int]")
parser.add_argument('--no-offset-speedup',
help="Turns off an offset speedup routine.\n" \
"Increases running time but may yield more hits.",
default=False, action='store_true')
parser.add_argument('--multiple-hits-keep-one',
help="When multiple kmer hits are found for a record\n" \
"keep one as unambiguous and the rest as ambiguous",
default=False, action='store_true')
parser.add_argument('--trim-sequences', nargs='+', type=int, default=None,
help="Trims from the barcodes in the input file\n" \
"The bases given in the list of tuples as START END START END .. where\n" \
"START is the integer position of the first base (0 based) and END is the integer\n" \
"position of the last base.\nTrimmng sequences can be given several times.")
parser.add_argument('--barcode-tag',
type=str,
help='Use the sequence in specified tag instead of the read sequence for the barcode demultiplexing.\n' \
'The tag must be a two-letter string and be present for all records in the input file.\n' \
'Can only be used with SAM or BAM formatted input files.',
default=None, metavar="[str]")
parser.add_argument('--version', action='version', version='%(prog)s ' + "0.3.2")
# Parse
if argv == None:
options = parser.parse_args()
else:
options = parser.parse_args(argv)
# Validate all options.
if not os.path.isfile(options.barcodes_infile) :
raise ValueError("Invalid true barcodes input file path.")
if not os.path.isfile(options.reads_infile) :
raise ValueError("Invalid reads input file path.")
if not (options.reads_infile.upper().endswith(".FASTQ") or \
options.reads_infile.upper().endswith(".FQ") or \
options.reads_infile.upper().endswith(".SAM") or \
options.reads_infile.upper().endswith(".FASTA") or \
options.reads_infile.upper().endswith(".FA") or \
options.reads_infile.upper().endswith(".BAM")):
raise ValueError("Invalid reads input file format: must be FASTQ, " \
"FASTA, SAM or BAM format and file end with .fq, fastq, .fa, .fasta, .sam or .bam")
if options.outfile_prefix is None or options.outfile_prefix == "":
raise ValueError("Invalid output file prefix.")
if options.k <= 0:
raise ValueError("Invalid kmer length. Must be > 0.")
if options.max_edit_distance < 0:
raise ValueError("Invalid max edit distance. Must be >= 0.")
if options.metric not in ("Subglobal", "Levenshtein", "Hamming"):
raise ValueError("Invalid metric. Must be Subglobal, Levenshtein or Hamming.")
if options.slider_increment < 0:
raise ValueError("Invalid slider increment. Must be >= 0.")
if options.slider_increment == 0:
options.slider_increment = int(options.k)
if options.start_position < 0:
raise ValueError("Invalid start position. Must be >= 0.")
if options.overhang < 0:
raise ValueError("Invalid overhang. Must be >= 0.")
if options.metric == "Hamming" and options.overhang > 0:
raise ValueError("Invalid overhang. Must be 0 for Hamming metric.")
if options.subprocesses < 0:
raise ValueError("Invalid no. of subprocesses. Must be >= 0.")
if options.ambiguity_factor < 1.0:
raise ValueError("Invalid ambiguity factor. Must be >= 1.")
# Check the the trimming sequences given are valid
if options.trim_sequences is not None \
and (len(options.trim_sequences) % 2 != 0 or min(options.trim_sequences)) < 0:
raise ValueError("Invalid trimming sequences given " \
"The number of positions given must be even and they must fit into the barcode length.")
if options.barcode_tag:
if len(options.barcode_tag) != 2:
raise ValueError("Invalid the \"--barcode-tag\" option must specify a two-letter string, current length is "+str(len(options.barcode_tag))+" letters (\""+options.barcode_tag+"\").\n")
if not (options.reads_infile.upper().endswith(".SAM") or options.reads_infile.upper().endswith(".BAM")):
raise ValueError("Invalid the \"--barcode-tag\" option can only be used with SAM or BAM formatted input files.\n")
# Read barcodes file
true_barcodes = bu.read_barcode_file(options.barcodes_infile)
# Paths
frmt = options.reads_infile.split(".")[-1]
fn_bc = os.path.abspath(options.barcodes_infile)
fn_reads = os.path.abspath(options.reads_infile)
fn_prefix = os.path.abspath(options.outfile_prefix)
fn_matched = None if options.no_matched_output else fn_prefix + "_matched." + frmt
fn_ambig = None if options.no_ambiguous_output else fn_prefix + "_ambiguous." + frmt
fn_unmatched = None if options.no_unmatched_output else fn_prefix + "_unmatched." + frmt
fn_results = None if options.no_results_output else fn_prefix + "_results.tsv"
# Subprocesses
if options.subprocesses == 0:
options.subprocesses = mp.cpu_count() - 1
print("# Options: " + str(options).split("Namespace")[-1])
print("# Barcodes input file: " + str(fn_bc))
print("# Reads input file: " + str(fn_reads))
print("# Matched output file: " + str(fn_matched))
print("# Ambiguous output file: " + str(fn_ambig))
print("# Unmatched output file: " + str(fn_unmatched))
print("# Results output file: " + str(fn_results))
print("# Number of barcodes in input: " + str(len(true_barcodes)))
lngth = len(list(true_barcodes.keys())[0])
print("# Barcode length: " + str(lngth))
print("# Barcode length when overhang added: " + \
str(lngth + min(options.start_position, options.overhang) + options.overhang))
# Check barcodes file.
if options.estimate_min_edit_distance > 0:
min_dist = estimate_min_edit_distance(true_barcodes, options.estimate_min_edit_distance)
if min_dist <= options.max_edit_distance:
raise ValueError("Invalid max edit distance: exceeds or equal " \
"to estimated minimum edit distance among true barcodes.")
print("# Estimate of minimum edit distance between true barcodes (may be less): " + str(min_dist))
else:
print("# Estimate of minimum edit distance between true barcodes (may be less): Not estimated")
# Make the input trim coordinates a list of tuples
trim_sequences = None
if options.trim_sequences is not None:
trim_sequences = list()
for i in range(len(options.trim_sequences) - 1):
if i % 2 == 0:
trim_sequences.append((options.trim_sequences[i],
options.trim_sequences[i+1]))
# Initialize main components
sub.init(true_barcodes,
options.start_position,
min(options.start_position, options.overhang),
options.overhang,
options.max_edit_distance,
options.homopolymer_filter,
options.seed,
options.multiple_hits_keep_one,
trim_sequences,
options.barcode_tag)
srch.init(true_barcodes,
options.k,
options.max_edit_distance,
options.metric,
options.slider_increment,
min(options.start_position, options.overhang),
options.overhang,
options.ambiguity_factor,
options.no_offset_speedup)
# Demultiplex
print("# Starting demultiplexing...")
stats = core.demultiplex(fn_reads,
fn_matched,
fn_ambig,
fn_unmatched,
fn_results,
options.subprocesses)
print("# ...finished demultiplexing")
print("# Wall time in secs: " + str(time.time() - start_time))
print(str(stats))
|
|
#!/usr/bin/env python
'''
Ansible module for user media
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix user media ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_mtype(zapi, mtype):
'''Get mediatype
If passed an int, return it as the mediatypeid
if its a string, then try to fetch through a description
'''
if isinstance(mtype, int):
return mtype
try:
return int(mtype)
except ValueError:
pass
content = zapi.get_content('mediatype', 'get', {'filter': {'description': mtype}})
if content.has_key('result') and content['result']:
return content['result'][0]['mediatypeid']
return None
def get_user(zapi, user):
''' Get userids from user aliases
'''
content = zapi.get_content('user', 'get', {'filter': {'alias': user}})
if content['result']:
return content['result'][0]
return None
def get_severity(severity):
''' determine severity
'''
if isinstance(severity, int) or \
isinstance(severity, str):
return severity
val = 0
sev_map = {
'not': 2**0,
'inf': 2**1,
'war': 2**2,
'ave': 2**3,
'avg': 2**3,
'hig': 2**4,
'dis': 2**5,
}
for level in severity:
val |= sev_map[level[:3].lower()]
return val
def get_zbx_user_query_data(zapi, user_name):
''' If name exists, retrieve it, and build query params.
'''
query = {}
if user_name:
zbx_user = get_user(zapi, user_name)
query = {'userid': zbx_user['userid']}
return query
def find_media(medias, user_media):
''' Find the user media in the list of medias
'''
for media in medias:
if all([media[key] == str(user_media[key]) for key in user_media.keys()]):
return media
return None
def get_active(is_active):
'''Determine active value
0 - enabled
1 - disabled
'''
active = 1
if isinstance(is_active, str):
is_active = is_active == 'enabled'
if is_active:
active = 0
return active
def get_mediatype(zapi, mediatype, mediatype_desc):
''' Determine mediatypeid
'''
mtypeid = None
if mediatype:
mtypeid = get_mtype(zapi, mediatype)
elif mediatype_desc:
mtypeid = get_mtype(zapi, mediatype_desc)
return mtypeid
def preprocess_medias(zapi, medias):
''' Insert the correct information when processing medias '''
for media in medias:
# Fetch the mediatypeid from the media desc (name)
if media.has_key('mediatype'):
media['mediatypeid'] = get_mediatype(zapi, mediatype=None, mediatype_desc=media.pop('mediatype'))
media['active'] = get_active(media.get('active'))
media['severity'] = int(get_severity(media['severity']))
return medias
# Disabling branching as the logic requires branches.
# I've also added a few safeguards which required more branches.
# pylint: disable=too-many-branches
def main():
'''
Ansible zabbix module for mediatype
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
login=dict(default=None, type='str'),
active=dict(default=False, type='bool'),
medias=dict(default=None, type='list'),
mediaid=dict(default=None, type='int'),
mediatype=dict(default=None, type='str'),
mediatype_desc=dict(default=None, type='str'),
#d-d,hh:mm-hh:mm;d-d,hh:mm-hh:mm...
period=dict(default=None, type='str'),
sendto=dict(default=None, type='str'),
severity=dict(default=None, type='str'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'user'
idname = "mediaid"
state = module.params['state']
# User media is fetched through the usermedia.get
zbx_user_query = get_zbx_user_query_data(zapi, module.params['login'])
content = zapi.get_content('usermedia', 'get',
{'userids': [uid for user, uid in zbx_user_query.items()]})
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
########
# Delete
########
if state == 'absent':
if not exists(content) or len(content['result']) == 0:
module.exit_json(changed=False, state="absent")
if not module.params['login']:
module.exit_json(failed=True, changed=False, results='Must specifiy a user login.', state="absent")
content = zapi.get_content(zbx_class_name, 'deletemedia', [res[idname] for res in content['result']])
if content.has_key('error'):
module.exit_json(changed=False, results=content['error'], state="absent")
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
active = get_active(module.params['active'])
mtypeid = get_mediatype(zapi, module.params['mediatype'], module.params['mediatype_desc'])
medias = module.params['medias']
if medias == None:
medias = [{'mediatypeid': mtypeid,
'sendto': module.params['sendto'],
'active': active,
'severity': int(get_severity(module.params['severity'])),
'period': module.params['period'],
}]
else:
medias = preprocess_medias(zapi, medias)
params = {'users': [zbx_user_query],
'medias': medias,
'output': 'extend',
}
########
# Create
########
if not exists(content):
if not params['medias']:
module.exit_json(changed=False, results=content['result'], state='present')
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'addmedia', params)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state='present')
# mediaid signifies an update
# If user params exists, check to see if they already exist in zabbix
# if they exist, then return as no update
# elif they do not exist, then take user params only
########
# Update
########
diff = {'medias': [], 'users': {}}
_ = [diff['medias'].append(media) for media in params['medias'] if not find_media(content['result'], media)]
if not diff['medias']:
module.exit_json(changed=False, results=content['result'], state="present")
for user in params['users']:
diff['users']['userid'] = user['userid']
# Medias have no real unique key so therefore we need to make it like the incoming user's request
diff['medias'] = medias
# We have differences and need to update
content = zapi.get_content(zbx_class_name, 'updatemedia', diff)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
|
"""utils originally from torchvision https://github.com/pytorch/vision/blob/master/references/classification/utils.py
With some changes.
Removing iterable wrapper in MetricLogger
"""
from collections import defaultdict, deque
import datetime
from time import time
import torch
import torch.distributed as dist
import errno
import os
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not dist.is_initialized() and dist.is_available():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def print(self, header=None):
if not header:
header = ''
print_str = header
for name, meter in self.meters.items():
print_str += F" {name}: {meter}"
print(print_str)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
class LearningRateScheduler:
"""Polynomial learning rate decay for multiple optimizers and multiple param groups
Args:
optimizers (list): optimizers for which to apply the learning rate changes
base_lrs (list): a nested list of base_lrs to use for each param_group of each optimizer
warmup_steps (int): number of linear warmup steps to perform at the beginning of training
warmup_factor (int): warmup factor according to the MLPerf warmup rule
see: https://github.com/mlperf/training_policies/blob/master/training_rules.adoc#91-hyperparameters-and-optimizer
for more information
decay_steps (int): number of steps over which to apply poly LR decay from base_lr to 0
decay_start_step (int): the optimization step at which to start decaying the learning rate
if None will start the decay immediately after
decay_power (float): polynomial learning rate decay power
end_lr_factor (float): for each optimizer and param group:
lr = max(current_lr_factor, end_lr_factor) * base_lr
Example:
lr_scheduler = LearningRateScheduler(optimizers=[optimizer], base_lrs=[[lr]],
warmup_steps=100, warmup_factor=0,
decay_start_step=1000, decay_steps=2000,
decay_power=2, end_lr_factor=1e-6)
for batch in data_loader:
lr_scheduler.step()
# foward, backward, weight update
"""
def __init__(self, optimizers, base_lrs, warmup_steps, warmup_factor,
decay_steps, decay_start_step, decay_power=2, end_lr_factor=0):
self.current_step = 0
self.optimizers = optimizers
self.base_lrs = base_lrs
self.warmup_steps = warmup_steps
self.warmup_factor = warmup_factor
self.decay_steps = decay_steps
self.decay_start_step = decay_start_step
self.decay_power = decay_power
self.end_lr_factor = end_lr_factor
self.decay_end_step = self.decay_start_step + self.decay_steps
if self.decay_start_step < self.warmup_steps:
raise ValueError('Learning rate warmup must finish before decay starts')
def _compute_lr_factor(self):
lr_factor = 1
if self.current_step <= self.warmup_steps:
warmup_step = 1 / (self.warmup_steps * (2 ** self.warmup_factor))
lr_factor = 1 - (self.warmup_steps - self.current_step) * warmup_step
elif self.decay_start_step < self.current_step <= self.decay_end_step:
lr_factor = ((self.decay_end_step - self.current_step) / self.decay_steps) ** self.decay_power
lr_factor = max(lr_factor, self.end_lr_factor)
elif self.current_step > self.decay_end_step:
lr_factor = self.end_lr_factor
return lr_factor
def step(self):
self.current_step += 1
lr_factor = self._compute_lr_factor()
for optim, base_lrs in zip(self.optimizers, self.base_lrs):
if isinstance(base_lrs, float):
base_lrs = [base_lrs]
for group_id, base_lr in enumerate(base_lrs):
optim.param_groups[group_id]['lr'] = base_lr * lr_factor
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def timer_start():
"""Synchronize, start timer and profiler"""
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
return start_time
def timer_stop():
"""Synchronize, stop timer and profiler"""
torch.cuda.synchronize()
stop_time = time()
torch.cuda.profiler.stop()
return stop_time
|
|
###############################################################################
# encoding: utf-8
#
# Copyright (c) 2011 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
import doctest
import unittest
import difflib
import pprint
from slimit.lexer import Lexer
def decorator(cls):
def make_test_function(input, expected):
def test_func(self):
lexer = self._get_lexer()
lexer.input(input)
result = ['%s %s' % (token.type, token.value) for token in lexer]
self.assertListEqual(result, expected)
return test_func
for index, (input, expected) in enumerate(cls.TEST_CASES):
func = make_test_function(input, expected)
setattr(cls, 'test_case_%d' % index, func)
return cls
# The structure and some test cases are taken
# from https://bitbucket.org/ned/jslex
@decorator
class LexerTestCase(unittest.TestCase):
def _get_lexer(self):
lexer = Lexer()
return lexer
def assertListEqual(self, first, second):
"""Assert that two lists are equal.
Prints differences on error.
This method is similar to that of Python 2.7 'assertListEqual'
"""
if first != second:
message = '\n'.join(
difflib.ndiff(pprint.pformat(first).splitlines(),
pprint.pformat(second).splitlines())
)
self.fail('Lists differ:\n' + message)
def test_illegal_unicode_char_in_identifier(self):
lexer = self._get_lexer()
lexer.input(u'\u0036_tail')
token = lexer.token()
# \u0036_tail is the same as 6_tail and that's not a correct ID
# Check that the token is NUMBER and not an ID
self.assertEqual(token.type, 'NUMBER')
self.assertEqual(token.value, '6')
TEST_CASES = [
# Identifiers
('i my_variable_name c17 _dummy $str $ _ CamelCase class2type',
['ID i', 'ID my_variable_name', 'ID c17', 'ID _dummy',
'ID $str', 'ID $', 'ID _', 'ID CamelCase', 'ID class2type']
),
(u'\u03c0 \u03c0_tail var\ua67c',
[u'ID \u03c0', u'ID \u03c0_tail', u'ID var\ua67c']),
# https://github.com/rspivak/slimit/issues/2
('nullify truelie falsepositive',
['ID nullify', 'ID truelie', 'ID falsepositive']),
# Keywords
# ('break case ...', ['BREAK break', 'CASE case', ...])
(' '.join(kw.lower() for kw in Lexer.keywords),
['%s %s' % (kw, kw.lower()) for kw in Lexer.keywords]
),
('break Break BREAK', ['BREAK break', 'ID Break', 'ID BREAK']),
# Literals
('null true false Null True False',
['NULL null', 'TRUE true', 'FALSE false',
'ID Null', 'ID True', 'ID False']
),
# Punctuators
('a /= b', ['ID a', 'DIVEQUAL /=', 'ID b']),
(('= == != === !== < > <= >= || && ++ -- << >> '
'>>> += -= *= <<= >>= >>>= &= %= ^= |='),
['EQ =', 'EQEQ ==', 'NE !=', 'STREQ ===', 'STRNEQ !==', 'LT <',
'GT >', 'LE <=', 'GE >=', 'OR ||', 'AND &&', 'PLUSPLUS ++',
'MINUSMINUS --', 'LSHIFT <<', 'RSHIFT >>', 'URSHIFT >>>',
'PLUSEQUAL +=', 'MINUSEQUAL -=', 'MULTEQUAL *=', 'LSHIFTEQUAL <<=',
'RSHIFTEQUAL >>=', 'URSHIFTEQUAL >>>=', 'ANDEQUAL &=', 'MODEQUAL %=',
'XOREQUAL ^=', 'OREQUAL |=',
]
),
('. , ; : + - * % & | ^ ~ ? ! ( ) { } [ ]',
['PERIOD .', 'COMMA ,', 'SEMI ;', 'COLON :', 'PLUS +', 'MINUS -',
'MULT *', 'MOD %', 'BAND &', 'BOR |', 'BXOR ^', 'BNOT ~',
'CONDOP ?', 'NOT !', 'LPAREN (', 'RPAREN )', 'LBRACE {', 'RBRACE }',
'LBRACKET [', 'RBRACKET ]']
),
('a / b', ['ID a', 'DIV /', 'ID b']),
# Numbers
(('3 3.3 0 0. 0.0 0.001 010 3.e2 3.e-2 3.e+2 3E2 3E+2 3E-2 '
'0.5e2 0.5e+2 0.5e-2 33 128.15 0x001 0X12ABCDEF 0xabcdef'),
['NUMBER 3', 'NUMBER 3.3', 'NUMBER 0', 'NUMBER 0.', 'NUMBER 0.0',
'NUMBER 0.001', 'NUMBER 010', 'NUMBER 3.e2', 'NUMBER 3.e-2',
'NUMBER 3.e+2', 'NUMBER 3E2', 'NUMBER 3E+2', 'NUMBER 3E-2',
'NUMBER 0.5e2', 'NUMBER 0.5e+2', 'NUMBER 0.5e-2', 'NUMBER 33',
'NUMBER 128.15', 'NUMBER 0x001', 'NUMBER 0X12ABCDEF',
'NUMBER 0xabcdef']
),
# Strings
(""" '"' """, ["""STRING '"'"""]),
(r'''"foo" 'foo' "x\";" 'x\';' "foo\tbar"''',
['STRING "foo"', """STRING 'foo'""", r'STRING "x\";"',
r"STRING 'x\';'", r'STRING "foo\tbar"']
),
(r"""'\x55' "\x12ABCDEF" '!@#$%^&*()_+{}[]\";?'""",
[r"STRING '\x55'", r'STRING "\x12ABCDEF"',
r"STRING '!@#$%^&*()_+{}[]\";?'"]
),
(r"""'\u0001' "\uFCEF" 'a\\\b\n'""",
[r"STRING '\u0001'", r'STRING "\uFCEF"', r"STRING 'a\\\b\n'"]
),
(u'"\u0442\u0435\u0441\u0442 \u0441\u0442\u0440\u043e\u043a\u0438\\""', [u'STRING "\u0442\u0435\u0441\u0442 \u0441\u0442\u0440\u043e\u043a\u0438\\""']),
# Bug - https://github.com/rspivak/slimit/issues/5
(r"var tagRegExp = new RegExp('<(\/*)(FooBar)', 'gi');",
['VAR var', 'ID tagRegExp', 'EQ =',
'NEW new', 'ID RegExp', 'LPAREN (',
r"STRING '<(\/*)(FooBar)'", 'COMMA ,', "STRING 'gi'",
'RPAREN )', 'SEMI ;']
),
# same as above but inside double quotes
(r'"<(\/*)(FooBar)"', [r'STRING "<(\/*)(FooBar)"']),
# multiline string (string written across multiple lines
# of code) https://github.com/rspivak/slimit/issues/24
(r"""var a = 'hello \
world'""",
['VAR var', 'ID a', 'EQ =', "STRING 'hello world'"]),
(r'''var a = "hello \
world"''',
['VAR var', 'ID a', 'EQ =', 'STRING "hello world"']),
# # Comments
# ("""
# //comment
# a = 5;
# """, ['LINE_COMMENT //comment', 'ID a', 'EQ =', 'NUMBER 5', 'SEMI ;']
# ),
# ('a//comment', ['ID a', 'LINE_COMMENT //comment']),
# ('/***/b/=3//line',
# ['BLOCK_COMMENT /***/', 'ID b', 'DIVEQUAL /=',
# 'NUMBER 3', 'LINE_COMMENT //line']
# ),
# ('/*\n * Copyright LGPL 2011 \n*/\na = 1;',
# ['BLOCK_COMMENT /*\n * Copyright LGPL 2011 \n*/',
# 'ID a', 'EQ =', 'NUMBER 1', 'SEMI ;']
# ),
# regex
(r'a=/a*/,1', ['ID a', 'EQ =', 'REGEX /a*/', 'COMMA ,', 'NUMBER 1']),
(r'a=/a*[^/]+/,1',
['ID a', 'EQ =', 'REGEX /a*[^/]+/', 'COMMA ,', 'NUMBER 1']
),
(r'a=/a*\[^/,1',
['ID a', 'EQ =', r'REGEX /a*\[^/', 'COMMA ,', 'NUMBER 1']
),
(r'a=/\//,1', ['ID a', 'EQ =', r'REGEX /\//', 'COMMA ,', 'NUMBER 1']),
# not a regex, just a division
# https://github.com/rspivak/slimit/issues/6
(r'x = this / y;',
['ID x', 'EQ =', 'THIS this', r'DIV /', r'ID y', r'SEMI ;']),
# next two are from
# http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["FOR for", "LPAREN (", "VAR var", "ID x", "EQ =", "ID a", "IN in",
"ID foo", "AND &&", 'STRING "</x>"', "OR ||", "ID mot", "CONDOP ?",
"ID z", "COLON :", "REGEX /x:3;x<5;y</g", "DIV /", "ID i", "RPAREN )",
"LBRACE {", "ID xyz", "LPAREN (", "ID x", "PLUSPLUS ++", "RPAREN )",
"SEMI ;", "RBRACE }"]
),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["FOR for", "LPAREN (", "VAR var", "ID x", "EQ =", "ID a", "IN in",
"ID foo", "AND &&", 'STRING "</x>"', "OR ||", "ID mot", "CONDOP ?",
"ID z", "DIV /", "ID x", "COLON :", "NUMBER 3", "SEMI ;", "ID x",
"LT <", "NUMBER 5", "SEMI ;", "ID y", "LT <", "REGEX /g/i",
"RPAREN )", "LBRACE {", "ID xyz", "LPAREN (", "ID x", "PLUSPLUS ++",
"RPAREN )", "SEMI ;", "RBRACE }"]
),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """,
['REGEX /????/', 'COMMA ,',
'REGEX /++++/', 'COMMA ,', 'REGEX /[----]/']
),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409
(r"""/\[/""", [r"""REGEX /\[/"""]),
(r"""/[i]/""", [r"""REGEX /[i]/"""]),
(r"""/[\]]/""", [r"""REGEX /[\]]/"""]),
(r"""/a[\]]/""", [r"""REGEX /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""REGEX /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""REGEX /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""REGEX /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""",
["ID rexl", "PERIOD .", "ID re", "EQ =", "LBRACE {",
"ID NAME", "COLON :",
r"""REGEX /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/""", "COMMA ,",
"ID UNQUOTED_LITERAL", "COLON :",
r"""REGEX /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"COMMA ,", "ID QUOTED_LITERAL", "COLON :",
r"""REGEX /^'(?:[^']|'')*'/""", "COMMA ,", "ID NUMERIC_LITERAL",
"COLON :",
r"""REGEX /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "COMMA ,",
"ID SYMBOL", "COLON :",
r"""REGEX /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"RBRACE }", "SEMI ;"]
),
("""
rexl.re = {
NAME: /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""",
["ID rexl", "PERIOD .", "ID re", "EQ =", "LBRACE {",
"ID NAME", "COLON :", r"""REGEX /^(?!\d)(?:\w)+|^"(?:[^"]|"")+"/""",
"COMMA ,", "ID UNQUOTED_LITERAL", "COLON :",
r"""REGEX /^@(?:(?!\d)(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"COMMA ,", "ID QUOTED_LITERAL", "COLON :",
r"""REGEX /^'(?:[^']|'')*'/""", "COMMA ,",
"ID NUMERIC_LITERAL", "COLON :",
r"""REGEX /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "COMMA ,",
"ID SYMBOL", "COLON :",
r"""REGEX /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"RBRACE }", "SEMI ;",
"ID str", "EQ =", """STRING '"'""", "SEMI ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["THIS this", "PERIOD .", "ID _js", "EQ =",
r'''STRING "e.str(\""''', "PLUS +", "THIS this", "PERIOD .",
"ID value", "PERIOD .", "ID replace", "LPAREN (", r"REGEX /\\/g",
"COMMA ,", r'STRING "\\\\"', "RPAREN )", "PERIOD .", "ID replace",
"LPAREN (", r'REGEX /"/g', "COMMA ,", r'STRING "\\\""', "RPAREN )",
"PLUS +", r'STRING "\")"', "SEMI ;"]),
]
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(LexerTestCase),
doctest.DocFileSuite(
'../lexer.py',
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS
),
))
|
|
from ctypes import *
import unittest
import os
import ctypes
import _ctypes_test
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
func = CDLL(_ctypes_test.__file__).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
##for n in "ABCDEFGHIMNOPQRS":
## print n, hex(getattr(BITS, n).size), getattr(BITS, n).offset
class C_Test(unittest.TestCase):
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
self.assertEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
self.assertEqual((name, i, getattr(b, name)), (name, i, func(byref(b), name)))
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
class BitFieldTest(unittest.TestCase):
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
x.a, x.b, x.c = -1, 7, -1
self.assertEqual((x.a, x.b, x.c), (-1, 7, -1))
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
self.assertEqual(sizeof(X), sizeof(c_longlong))
x = X()
self.assertEqual((x.a, x.b, x.c), (0, 0, 0))
x.a, x.b, x.c = 7, 7, 7
self.assertEqual((x.a, x.b, x.c), (1, 7, 1))
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ)*2)
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, -1, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, -1, 0))
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
x = X()
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
x.a = -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 7, 0, 0))
x.a, x.b = 0, -1
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 7, 0))
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
result = self.fail_fields(("a", c_void_p, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
result = self.fail_fields(("a", c_char, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
try:
c_wchar
except NameError:
pass
else:
result = self.fail_fields(("a", c_wchar, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
self.assertEqual(result[0], TypeError)
self.assertIn('bit fields not allowed for type', result[1])
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
result = self.fail_fields(("a", c_typ, 0))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
class X(Structure):
_fields_ = [("a", c_typ, 1)]
self.assertEqual(sizeof(X), sizeof(c_typ))
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
self.assertEqual(sizeof(X), sizeof(c_typ))
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short))
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, 0)
self.assertEqual(X.a1.offset, sizeof(c_short))
self.assertEqual(X.b.offset, sizeof(c_short)*2)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
self.assertEqual(sizeof(X), sizeof(c_short)*3)
self.assertEqual(X.a.offset, sizeof(c_short)*0)
self.assertEqual(X.b.offset, sizeof(c_short)*1)
self.assertEqual(X.c.offset, sizeof(c_short)*2)
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception, detail:
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name in ("nt", "ce"):
self.assertEqual(sizeof(X), sizeof(c_int)*2)
else:
self.assertEqual(sizeof(X), sizeof(c_int))
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
self.assertEqual(sizeof(X), sizeof(c_int)*2)
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
self.assertEqual(sizeof(X), sizeof(c_byte))
def test_mixed_4(self):
class X(Structure):
_fields_ = [("a", c_short, 4),
("b", c_short, 4),
("c", c_int, 24),
("d", c_short, 4),
("e", c_short, 4),
("f", c_int, 24)]
# MSVC does NOT combine c_short and c_int into one field, GCC
# does (unless GCC is run with '-mms-bitfields' which
# produces code compatible with MSVC).
if os.name in ("nt", "ce"):
self.assertEqual(sizeof(X), sizeof(c_int) * 4)
else:
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
if __name__ == "__main__":
unittest.main()
|
|
"""Tests for eval_lib.image_batches."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import unittest
from six import assertCountEqual
from eval_lib import image_batches
from eval_lib import submissions
from eval_lib.tests import fake_cloud_client
ROUND_NAME = "round-name"
class ImageBatchesBaseTest(unittest.TestCase):
def setUp(self):
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
self.image_batches = image_batches.ImageBatchesBase(
datastore_client=self.datastore_client,
entity_kind_batches="Batch",
entity_kind_images="Image",
)
def test_add_batch(self):
self.assertEqual(0, len(self.image_batches.data))
self.image_batches.add_batch(
"batch1", batch_properties={"k1": "v1", "k2": "v2"}
)
self.assertEqual(1, len(self.image_batches.data))
self.assertDictEqual(
{"k1": "v1", "k2": "v2", "images": {}}, self.image_batches["batch1"]
)
self.image_batches.add_batch("batch2", batch_properties={"k3": "v3"})
self.assertEqual(2, len(self.image_batches.data))
self.assertDictEqual({"k3": "v3", "images": {}}, self.image_batches["batch2"])
def test_add_image(self):
self.assertEqual(0, len(self.image_batches.data))
self.image_batches.add_batch(
"batch1", batch_properties={"k1": "v1", "k2": "v2"}
)
self.image_batches.add_image("batch1", "img1", image_properties={"k4": "v4"})
self.assertEqual(1, len(self.image_batches.data))
self.assertDictEqual(
{"k1": "v1", "k2": "v2", "images": {"img1": {"k4": "v4"}}},
self.image_batches["batch1"],
)
self.image_batches.add_image("batch1", "img2", image_properties={"k5": "v5"})
self.assertEqual(1, len(self.image_batches.data))
self.assertDictEqual(
{
"k1": "v1",
"k2": "v2",
"images": {"img1": {"k4": "v4"}, "img2": {"k5": "v5"}},
},
self.image_batches["batch1"],
)
def test_write_to_datastore(self):
# add 2 batches and 3 images, write everything to datastore
self.image_batches.add_batch(
"batch1", batch_properties={"k1": "v1", "k2": "v2"}
)
self.image_batches.add_batch("batch2", batch_properties={"k3": "v3"})
self.image_batches.add_image("batch1", "img1", image_properties={"k4": "v4"})
self.image_batches.add_image("batch1", "img2", image_properties={"k5": "v5"})
self.image_batches.add_image("batch2", "img3", image_properties={"k6": "v6"})
self.image_batches.write_to_datastore()
# verify batches
batch_entity1 = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("Batch", "batch1")
)
batch_entity1.update({"k1": "v1", "k2": "v2"})
batch_entity2 = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("Batch", "batch2")
)
batch_entity2.update({"k3": "v3"})
assertCountEqual(
self,
[batch_entity1, batch_entity2],
self.datastore_client.query_fetch(kind="Batch"),
)
# verify images
img_entity1 = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("Batch", "batch2", "Image", "img1")
)
img_entity1.update({"k4": "v4"})
img_entity2 = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("Batch", "batch2", "Image", "img2")
)
img_entity2.update({"k5": "v5"})
img_entity3 = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("Batch", "batch2", "Image", "img3")
)
img_entity3.update({"k6": "v6"})
def test_write_single_batch_images_to_datastore(self):
# add 2 batches and 3 images, write only one batch to datastore
self.image_batches.add_batch(
"batch1", batch_properties={"k1": "v1", "k2": "v2"}
)
self.image_batches.add_batch("batch2", batch_properties={"k3": "v3"})
self.image_batches.add_image("batch1", "img1", image_properties={"k4": "v4"})
self.image_batches.add_image("batch1", "img2", image_properties={"k5": "v5"})
self.image_batches.add_image("batch2", "img3", image_properties={"k6": "v6"})
self.image_batches.write_single_batch_images_to_datastore("batch2")
# verify batches
# write_single_batch_images_to_datastore writes only images, so no batches
assertCountEqual(self, [], self.datastore_client.query_fetch(kind="Batch"))
# verify images
img_entity3 = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("Batch", "batch2", "Image", "img3")
)
img_entity3.update({"k6": "v6"})
assertCountEqual(
self, [img_entity3], self.datastore_client.query_fetch(kind="Image")
)
class DatasetBatchesTest(unittest.TestCase):
def setUp(self):
storage_blobs = [
"dataset/dev/img1.png",
"dataset/dev/img2.png",
"dataset/dev/img3.png",
"dataset/dev/img4.png",
"dataset/dev/img5.png",
"dataset/dev_dataset.csv",
]
self.storage_client = fake_cloud_client.FakeStorageClient(storage_blobs)
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
self.dataset_batches = image_batches.DatasetBatches(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
dataset_name="dev",
)
def verify_dataset_batches(self):
self.assertEqual(2, len(self.dataset_batches.data))
all_images = {}
for batch in self.dataset_batches.data.values():
self.assertIn(batch["epsilon"], [4, 8, 12, 16])
self.assertGreaterEqual(3, len(batch["images"]))
self.assertTrue(
set(all_images.keys()).isdisjoint(batch["images"].keys()),
msg=(
"all_images and batch['images'] contains common keys %s"
% set(all_images.keys()).intersection(batch["images"].keys())
),
)
all_images.update(batch["images"])
assertCountEqual(
self,
[
{"dataset_image_id": "img1", "image_path": "dataset/dev/img1.png"},
{"dataset_image_id": "img2", "image_path": "dataset/dev/img2.png"},
{"dataset_image_id": "img3", "image_path": "dataset/dev/img3.png"},
{"dataset_image_id": "img4", "image_path": "dataset/dev/img4.png"},
{"dataset_image_id": "img5", "image_path": "dataset/dev/img5.png"},
],
all_images.values(),
)
def verify_datastore_entities(self):
# Verify 'DatasetBatch' entities
expected_batch_entities = []
for batch_id, batch in self.dataset_batches.data.items():
entity = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey("DatasetBatch", batch_id)
)
entity["epsilon"] = batch["epsilon"]
expected_batch_entities.append(entity)
assertCountEqual(
self,
expected_batch_entities,
self.datastore_client.query_fetch(kind="DatasetBatch"),
)
# Verify 'DatasetImage' entities
expected_image_entities = []
for batch_id, batch in self.dataset_batches.data.items():
for image_id, image in batch["images"].items():
entity = self.datastore_client.entity(
fake_cloud_client.FakeDatastoreKey(
"DatasetBatch", batch_id, "DatasetImage", image_id
)
)
entity.update(image)
expected_image_entities.append(entity)
assertCountEqual(
self,
expected_image_entities,
self.datastore_client.query_fetch(kind="DatasetImage"),
)
def test_init_from_storage(self):
self.dataset_batches.init_from_storage_write_to_datastore(batch_size=3)
self.verify_dataset_batches()
self.verify_datastore_entities()
def test_init_from_datastore(self):
self.dataset_batches.init_from_storage_write_to_datastore(batch_size=3)
self.dataset_batches = image_batches.DatasetBatches(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
dataset_name="dev",
)
self.dataset_batches.init_from_datastore()
self.verify_dataset_batches()
def test_count_num_images(self):
self.dataset_batches.init_from_storage_write_to_datastore(batch_size=3)
self.assertEqual(5, self.dataset_batches.count_num_images())
class AdversarialBatchesTest(unittest.TestCase):
def setUp(self):
# prepare dataset batches and submissions
storage_blobs = [
"dataset/dev/img1.png",
"dataset/dev/img2.png",
"dataset/dev/img3.png",
"dataset/dev/img4.png",
"dataset/dev/img5.png",
"dataset/dev_dataset.csv",
ROUND_NAME + "/submissions/nontargeted/1.zip",
ROUND_NAME + "/submissions/nontargeted/baseline_nt.zip",
ROUND_NAME + "/submissions/targeted/1.zip",
ROUND_NAME + "/submissions/targeted/2.zip",
ROUND_NAME + "/submissions/defense/3.zip",
ROUND_NAME + "/submissions/defense/baseline_adv_train.zip",
]
self.storage_client = fake_cloud_client.FakeStorageClient(storage_blobs)
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
self.dataset_batches = image_batches.DatasetBatches(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
dataset_name="dev",
)
self.dataset_batches.init_from_storage_write_to_datastore(batch_size=3)
self.submissions = submissions.CompetitionSubmissions(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
round_name=ROUND_NAME,
)
self.submissions.init_from_storage_write_to_datastore()
def verify_adversarial_batches_without_images(self, adv_batches):
attack_ids = list(self.submissions.attacks.keys()) + list(
self.submissions.targeted_attacks.keys()
)
dataset_batch_ids = self.dataset_batches.data.keys()
expected_batches = [
{"dataset_batch_id": b_id, "submission_id": a_id, "images": {}}
for (b_id, a_id) in itertools.product(dataset_batch_ids, attack_ids)
]
assertCountEqual(self, expected_batches, adv_batches.data.values())
def test_init_from_dataset_and_submissions(self):
adv_batches = image_batches.AversarialBatches(
datastore_client=self.datastore_client
)
adv_batches.init_from_dataset_and_submissions_write_to_datastore(
dataset_batches=self.dataset_batches,
attack_submission_ids=self.submissions.get_all_attack_ids(),
)
self.verify_adversarial_batches_without_images(adv_batches)
def test_init_from_datastore(self):
# populate datastore
adv_batches = image_batches.AversarialBatches(
datastore_client=self.datastore_client
)
adv_batches.init_from_dataset_and_submissions_write_to_datastore(
dataset_batches=self.dataset_batches,
attack_submission_ids=self.submissions.get_all_attack_ids(),
)
# init AversarialBatches from datastore
adv_batches = image_batches.AversarialBatches(
datastore_client=self.datastore_client
)
adv_batches.init_from_datastore()
self.verify_adversarial_batches_without_images(adv_batches)
def test_count_generated_adv_examples(self):
adv_batches = image_batches.AversarialBatches(
datastore_client=self.datastore_client
)
adv_batches.init_from_dataset_and_submissions_write_to_datastore(
dataset_batches=self.dataset_batches,
attack_submission_ids=self.submissions.get_all_attack_ids(),
)
self.assertDictEqual(
{"SUBA000": 0, "SUBA001": 0, "SUBT000": 0, "SUBT001": 0},
adv_batches.count_generated_adv_examples(),
)
if __name__ == "__main__":
unittest.main()
|
|
from twisted.words.protocols import irc
from twisted.internet import protocol
import traceback,sys
import collections
import shlex
class Bot(irc.IRCClient):
def __init__(self):
self._is_signed_on = False
self._joined_channels = set()
self._msg_queue = []
self.command_modules = {}
self.signed_on_cbs = []
self.joined_channel_cbs = []
self.privmsg_monitors = []
def _get_nickname(self):
return self.factory.nickname
nickname = property(_get_nickname)
def install_command_module(self,name,command_module):
assert name not in self.command_modules
self.command_modules[name] = command_module
command_module.config = self.main_context['config']
command_module.main_context = self.main_context
command_module.bot = self
def poll_msg_queue(self):
if not self.is_signed_on():
return
next_msg_queue = []
for msg in self._msg_queue:
if not self.is_signed_on():
next_msg_queue += [msg]
continue
user,message,length = msg
#print
#print
#print 'for \'{msg}\' in self._msg_queue'.format(msg=msg)
#print
#print
if len(user) == 0:
continue
if user[0] != '#':
self.msg(user,message,length)
continue
channel = user
if channel not in self._joined_channels:
next_msg_queue += [msg]
continue
self.msg(channel,message,length)
self._msg_queue = next_msg_queue
def queue_msg(self,user,message,length=None):
#print 'queue_msg({user},{message},{length})'.format(user=user,message=message,length=length)
self._msg_queue.append((user,message,length))
def receivedMOTD(self, motd):
print 'receivedMOTD()'
#print '\n'.join(motd)
print 'sending join command, self.factory.channel:',self.factory.channel
self.join(self.factory.channel)
def lineReceived(self, line):
print 'lineReceived({line})'.format(line=line)
irc.IRCClient.lineReceived(self,line)
def signedOn(self):
#print 'self.factory.channel:',self.factory.channel
self._is_signed_on = True
print "Signed on as %s." % (self.nickname,)
for cb in self.signed_on_cbs:
try:
cb(self)
except Exception as e:
print 'Exception while calling signed_on_cbs, e:',e
print 'cb:',cb
traceback.print_exc(file=sys.stdout)
continue
"""
{channel => {nick => host}}
"""
self.channel_nicks = {}
def is_signed_on(self):
return self._is_signed_on
def joined(self, channel):
print "Joined %s." % (channel,)
self._joined_channels.add(channel)
for cb in self.joined_channel_cbs:
try:
cb(self)
except Exception as e:
print 'Exception while calling joined_channel_cbs, e:',e
print 'cb:',cb
traceback.print_exc(file=sys.stdout)
continue
def left(self, channel):
print "Left %s." % (channel,)
self._joined_channels.discard(channel)
def kickedFrom(self, channel, kicker, message):
self.joined_channels.discard(channel)
"""
def names(self, channel):
"List the users in 'channel', usage: client.who('#testroom')"
#self.sendLine('NAMES %s' % channel)
self.channel_nicks[channel] = {}
pass
def irc_RPL_NAMREPLY(self, prefix, params):
print 'irc_RPL_NAMREPLY({prefix},{params})'.format(prefix=prefix,params=params)
channel = params[2]
nicks = params[3].strip().split(' ')
this_channel_nicks = self.channel_nicks[channel]
for nick in nicks:
this_channel_nicks.add(nick)
print 'self.channel_nicks:',self.channel_nicks
def irc_RPL_ENDOFNAMES(self, prefix, params):
print 'irc_RPL_ENDOFNAMES({prefix},{params})'.format(prefix=prefix,params=params)
def joined(self, channel):
#print "Joined %s." % (channel,)
self.names(channel)
pass
def remember_user(self,user,channel):
channel_nicks = self.channel_nicks
if channel not in channel_nicks:
channel_nicks[channel] = set()
nick,_,_ = user.partition('!')
channel_nicks[channel] |= set([nick])
def forget_user(self,user, channel):
channel_nicks = self.channel_nicks
if channel not in channel_nicks:
print "WARNING, SOMETHING WRONG IN forget_user(): channel not in channel_nicks"
return
nick,_,_ = user.partition('!')
if nick not in channel_nicks[channel]:
print "WARNING, SOMETHING WRONG IN forget_user(): nick not in channel_nicks[channel]"
return
channel_nicks[channel].discard(nick)
"""
def userJoined(self,user, channel):
print 'userJoined({user},{channel})'.format(user=user,channel=channel)
#if channel in self.channel_nicks:
# print 'WARNING: userJoined(), channel in self.channel_nicks'
#self.channel_nicks[channel] = set()
#self.remember_user(user,channel)
"""
def userLeft(self,user, channel):
print 'userLeft({user},{channel})'.format(user=user,channel=channel)
self.forget_user(user,channel)
def userQuit(self,user, channel):
print 'userQuit({user},{channel})'.format(user=user,channel=channel)
self.forget_user(user,channel)
def userKicked(self,user, channel):
print 'userKicked({user},{channel})'.format(user=user,channel=channel)
self.forget_user(user,channel)
def userRenamed(self,oldname, newname):
channel_nicks = self.channel_nicks
print 'userKicked({oldname},{newname})'.format(newname=newname,oldname=oldname)
for channel,nicks in channel_nicks.iteritems():
if oldname in nicks:
nicks.discard(oldname)
nicks.add(newname)
"""
def print_usage(self,variables,wrong_usage=False):
usage = ''
cmds = []
usage_list = []
for cmd_name,cmd_module in self.command_modules.iteritems():
try:
cmds += [cmd_name]
usage_list += [cmd_module.usage()]
except Exception as e:
print 'Exception printing usage of ',cmd_name, 'module:',cmd_module, 'e:',e
traceback.print_exc(file=sys.stdout)
continue
usage += '\n\n' + 'Available commands: ' + ' '.join(cmds)
usage += '\n\n' + 'Usage: \n ' + '\n '.join(usage_list) + '\n\n'
self.msg(variables['usernick'],usage)
if variables['in_channel']:
if wrong_usage:
self.msg(variables['response_channel'], variables['response_prefix'] + 'umm ... wrong usage ... I pm\'d you proper usage!')
else:
self.msg(variables['response_channel'], variables['response_prefix'] + 'I pm\'d you the proper usage.')
def run_command(self,full_cmd,variables):
args = shlex.split(full_cmd)
cmd = args[0]
args = args[1:]
if cmd not in self.command_modules:
self.print_usage(variables,True)
return
try:
self.command_modules[cmd].run(cmd,args,variables)
except Exception as e:
print e
self.msg(variables['response_channel'], variables['response_prefix'] + 'Error running command, see bot logs')
raise
def irc_MODE(self, prefix, params):
#print 'irc_MODE({prefix}, {params})'.format(prefix=prefix,params=params)
irc.IRCClient.irc_MODE(self,prefix,params)
def irc_NOTICE(self, prefix, params):
#print 'irc_NOTICE({prefix}, {params})'.format(prefix=prefix,params=params)
irc.IRCClient.irc_NOTICE(self,prefix,params)
def irc_RPL_MYINFO(self, prefix, params):
#print 'irc_RPL_MYINFO({prefix}, {params})'.format(prefix=prefix,params=params)
irc.IRCClient.irc_RPL_MYINFO(self,prefix,params)
def irc_RPL_YOURHOST(self, prefix, params):
#print 'irc_RPL_YOURHOST({prefix}, {params})'.format(prefix=prefix,params=params)
irc.IRCClient.irc_RPL_YOURHOST(self,prefix,params)
def irc_unknown(self, prefix, command, params):
#print 'irc_unknown({prefix}, {command}, {params})'.format(prefix=prefix,command=command,params=params)
irc.IRCClient.irc_unknown(self,prefix, command, params)
def irc_ERR_CHANOPRIVSNEEDED(self,prefix,params):
#print 'irc_ERR_CHANOPRIVSNEEDED({prefix}, {params})'.format(prefix=prefix,params=params)
pass
def handleCommand(self, command, prefix, params):
#print 'handleCommand({command}, {prefix}, {params})'.format(command=command,prefix=prefix,params=params)
irc.IRCClient.handleCommand(self,command,prefix,params)
def privmsg(self, user, channel, msg):
try:
print channel+'>',user+':',msg
usernick,_,_ = user.partition('!')
variables = {'nick':self.nickname,'user':user,'usernick':usernick,'response_channel':channel,'channel':channel}
variables['response_prefix'] = usernick + ': '
"""
#if this is not a pm
if channel != self.nickname:
if msg.startswith(self.nickname + ":"):
self.msg(channel, usernick+': '+ '/msg me, or pm me, I cannot do business in an open channel.')
return
return
"""
full_cmd = ''
variables['in_channel'] = True
variables['addresses_me'] = False
#if this is a pm
if channel == self.nickname:
variables['response_channel'] = usernick
variables['response_prefix'] = ''
variables['in_channel'] = False
full_cmd = msg
if msg.startswith(self.nickname + ":"):
self.msg(variables['response_channel'], variables['response_prefix']+ 'This is a PM, Don\'t prefix your commands with "{botnick}:".'.format(botnick=self.nickname))
return
variables['addresses_me'] = True
elif msg.startswith(self.nickname + ":"):
full_cmd = msg[ len(self.nickname + ":"): ].strip()
variables['addresses_me'] = True
for monitor in self.privmsg_monitors:
try:
monitor(self,user,channel,msg,variables)
except Exception as e:
print 'Exception in monitor:',e
print 'monitor:',monitor
traceback.print_exc(file=sys.stdout)
if not variables['addresses_me']:
return
if len(full_cmd) == 0:
self.msg(channel, 'shhh'.format(**variables))
return
if full_cmd[0] == '!':
self.run_command(full_cmd[1:],variables)
return
full_cmd = full_cmd.lower()
d = { }
if full_cmd not in d:
self.msg(variables['response_channel'], 'Odd, I thought I heard something.'.format(**variables))
return
self.msg(channel, d[full_cmd].format(**variables))
return
pass
except Exception as e:
print 'Exception in privmsg:',e
traceback.print_exc(file=sys.stdout)
class BotFactory(protocol.ClientFactory):
protocol = Bot
def buildProtocol(self, addr):
bot = protocol.ClientFactory.buildProtocol(self,addr)
self.bots.append(bot)
bot.main_context = self.main_context
bot.config = self.config
for cb in self.bot_created_cbs:
try:
cb(bot)
except Exception as e:
print 'BotFactory.buildProtocol():cb error:',e,'cb:',cb
traceback.print_exc(file=sys.stdout)
return bot
def __init__(self, channel, nickname,main_context):
print 'channel:',channel
self.channel = channel
self.nickname = nickname
self.bots = []
self.main_context = main_context
self.config = main_context['config']
self.bot_created_cbs = []
def clientConnectionLost(self, connector, reason):
print "Lost connection (%s), reconnecting." % (reason,)
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "Could not connect: %s" % (reason,)
|
|
#! /usr/bin/env python
#
# example2.py -- Simple, configurable FITS viewer.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import ginga.toolkit as ginga_toolkit
from ginga import colors
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.canvas import render
from ginga.misc import log
from ginga.util.loader import load_data
class FitsViewer(object):
def __init__(self, logger, render='widget'):
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
from ginga.gw import Widgets, Viewers, GwHelp
self.app = Widgets.Application(logger=logger)
self.app.add_callback('shutdown', self.quit)
if hasattr(Widgets, 'Page'):
self.page = Widgets.Page("Ginga example2")
self.app.add_window(self.page)
self.top = Widgets.TopLevel("Ginga example2")
self.page.add_dialog(self.top)
else:
self.top = Widgets.TopLevel("Ginga example2")
self.app.add_window(self.top)
self.top.add_callback('close', self.closed)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
fi = Viewers.CanvasView(logger=logger, render=render)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_zoom_algorithm('rate')
fi.set_zoomrate(1.4)
fi.show_pan_mark(True)
fi.set_callback('drag-drop', self.drop_file_cb)
fi.set_callback('cursor-changed', self.cursor_cb)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_set_active(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# add a color bar
#fi.private_canvas.add(self.dc.ColorBar(side='bottom', offset=10))
# add little mode indicator that shows modal states in
# lower left hand corner
fi.private_canvas.add(self.dc.ModeIndicator(corner='ur', fontsize=14))
# little hack necessary to get correct operation of the mode indicator
# in all circumstances
bm = fi.get_bindmap()
bm.add_callback('mode-set', lambda *args: fi.redraw(whence=3))
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue', coord='data')
canvas.set_surface(fi)
self.canvas = canvas
# add canvas to view
fi.get_canvas().add(canvas)
canvas.ui_set_active(True)
canvas.register_for_cursor_drawing(fi)
canvas.add_callback('draw-event', self.draw_cb)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
fi.set_desired_size(512, 512)
iw = Viewers.GingaScrolledViewerWidget(viewer=fi)
vbox.add_widget(iw, stretch=1)
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
hbox = Widgets.HBox()
hbox.set_border_width(2)
hbox.set_spacing(4)
wdrawtype = Widgets.ComboBox()
for name in self.drawtypes:
wdrawtype.append_text(name)
index = self.drawtypes.index('rectangle')
wdrawtype.set_index(index)
wdrawtype.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawtype = wdrawtype
wdrawcolor = Widgets.ComboBox()
for name in self.drawcolors:
wdrawcolor.append_text(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.set_index(index)
wdrawcolor.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawcolor = wdrawcolor
wfill = Widgets.CheckBox("Fill")
wfill.add_callback('activated', lambda w, tf: self.set_drawparams())
self.wfill = wfill
walpha = Widgets.SpinBox(dtype=float)
walpha.set_limits(0.0, 1.0, incr_value=0.1)
walpha.set_value(1.0)
walpha.set_decimals(2)
walpha.add_callback('value-changed', lambda w, val: self.set_drawparams())
self.walpha = walpha
wclear = Widgets.Button("Clear Canvas")
wclear.add_callback('activated', lambda w: self.clear_canvas())
wopen = Widgets.Button("Open File")
wopen.add_callback('activated', lambda w: self.open_file())
wquit = Widgets.Button("Quit")
wquit.add_callback('activated', lambda w: self.quit())
hbox.add_widget(Widgets.Label(''), stretch=1)
for w in (wopen, wdrawtype, wdrawcolor, wfill,
Widgets.Label('Alpha:'), walpha, wclear, wquit):
hbox.add_widget(w, stretch=0)
vbox.add_widget(hbox, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
hbox.set_spacing(4)
btn1 = Widgets.RadioButton("Draw")
btn1.set_state(mode == 'draw')
btn1.add_callback('activated', lambda w, val: self.set_mode_cb('draw', val))
btn1.set_tooltip("Choose this to draw on the canvas")
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Edit", group=btn1)
btn2.set_state(mode == 'edit')
btn2.add_callback('activated', lambda w, val: self.set_mode_cb('edit', val))
btn2.set_tooltip("Choose this to edit things on the canvas")
hbox.add_widget(btn2)
btn3 = Widgets.RadioButton("Pick", group=btn1)
btn3.set_state(mode == 'pick')
btn3.add_callback('activated', lambda w, val: self.set_mode_cb('pick', val))
btn3.set_tooltip("Choose this to pick things on the canvas")
hbox.add_widget(btn3)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
self.top.set_widget(vbox)
self.fs = None
if hasattr(GwHelp, 'FileSelection'):
self.fs = GwHelp.FileSelection(self.top.get_widget())
def set_drawparams(self):
index = self.wdrawtype.get_index()
kind = self.drawtypes[index]
index = self.wdrawcolor.get_index()
fill = self.wfill.get_state()
alpha = self.walpha.get_value()
params = {'color': self.drawcolors[index],
'alpha': alpha,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.delete_all_objects()
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
self.top.set_title(filepath)
def open_file(self):
self.fs.popup("Open FITS file", self.load_file)
def drop_file_cb(self, viewer, paths):
filename = paths[0]
self.load_file(filename)
def cursor_cb(self, viewer, button, data_x, data_y):
"""This gets called when the data position relative to the cursor
changes.
"""
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + viewer.data_off),
int(data_y + viewer.data_off))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def set_mode_cb(self, mode, tf):
self.logger.info("canvas mode changed (%s) %s" % (mode, tf))
if not (tf is False):
self.canvas.set_draw_mode(mode)
return True
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
obj.add_callback('pick-down', self.pick_cb, 'down')
obj.add_callback('pick-up', self.pick_cb, 'up')
obj.add_callback('pick-move', self.pick_cb, 'move')
obj.add_callback('pick-hover', self.pick_cb, 'hover')
obj.add_callback('pick-enter', self.pick_cb, 'enter')
obj.add_callback('pick-leave', self.pick_cb, 'leave')
obj.add_callback('pick-key', self.pick_cb, 'key')
obj.pickable = True
obj.add_callback('edited', self.edit_cb)
def pick_cb(self, obj, canvas, event, pt, ptype):
self.logger.info("pick event '%s' with obj %s at (%.2f, %.2f)" % (
ptype, obj.kind, pt[0], pt[1]))
return True
def edit_cb(self, obj):
self.logger.info("object %s has been edited" % (obj.kind))
return True
def closed(self, w):
self.logger.info("Top window closed.")
self.top = None
sys.exit()
def quit(self, *args):
self.logger.info("Attempting to shut down the application...")
if self.top is not None:
self.top.close()
sys.exit()
def main(options, args):
logger = log.get_logger("example2", options=options)
if options.toolkit is None:
logger.error("Please choose a GUI toolkit with -t option")
# decide our toolkit, then import
ginga_toolkit.use(options.toolkit)
rw = 'opengl' if options.renderer == 'opengl' else 'widget'
viewer = FitsViewer(logger, render=rw)
if options.renderer is not None and options.renderer != 'opengl':
render_class = render.get_render_class(options.renderer)
viewer.fitsimage.set_renderer(render_class(viewer.fitsimage))
viewer.top.resize(700, 540)
if len(args) > 0:
viewer.load_file(args[0])
viewer.top.show()
viewer.top.raise_()
try:
app = viewer.top.get_app()
app.mainloop()
except KeyboardInterrupt:
print("Terminating viewer...")
if viewer.top is not None:
viewer.top.close()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
argprs.add_argument("-r", "--renderer", dest="renderer", metavar="NAME",
default=None,
help="Choose renderer (pil|agg|opencv|cairo|qt)")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
main(options, args)
|
|
import socket
from collections import deque
from threading import Thread
import logging
import Queue
import sys
from YamahaAV import YamahaAV
logger = logging.getLogger('yamaha-av')
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# static / constants
# NUMCHAR_CODES[zone][action]
NUMCHAR_CODES = {
1: { '1': '7F0151AE',
'2': '7F0152AD',
'3': '7F0153AC',
'4': '7F0154AB',
'5': '7F0155AA',
'6': '7F0156A9',
'7': '7F0157A8',
'8': '7F0158A7',
'9': '7F0159A6',
'0': '7F015AA5',
'+10': '7F015BA4',
'ENT': '7F015CA3' },
2: { '1': '7F01718F',
'2': '7F01728C',
'3': '7F01738D',
'4': '7F01748A',
'5': '7F01758B',
'6': '7F017688',
'7': '7F017789',
'8': '7F017886',
'9': '7F017986',
'0': '7F017A84',
'+10': '7F017B85',
'ENT': '7F017C82' }
}
NUMCHAR_CODES[0] = NUMCHAR_CODES[1]
# OPERATION_CODES[zone][action]
OPERATION_CODES = {
1: { 'Play': '7F016897',
'Stop': '7F016996',
'Pause': '7F016798',
'Search-': '7F016A95',
'Search+': '7F016E94',
'Skip-': '7F016C93',
'Skip+': '7F016D92',
'FM': '7F015827',
'AM': '7F01552A' },
2: { 'Play': '7F018876',
'Stop': '7F018977',
'Pause': '7F018779',
'Search-': '7F018A74',
'Search+': '7F018B75',
'Skip-': '7F018C72',
'Skip+': '7F018D73',
'FM': '7F015927',
'AM': '7F015628' }
}
OPERATION_CODES[0] = OPERATION_CODES[1]
# CURSOR_CODES[zone][action]
CURSOR_CODES = {
1: { 'Up': '7A859D62',
'Down': '7A859C63',
'Left': '7A859F60',
'Right': '7A859E61',
'Enter': '7A85DE21',
'Return': '7A85AA55',
'Level': '7A858679',
'On Screen': '7A85847B',
'Option': '7A856B14',
'Top Menu': '7A85A0DF',
'Pop Up Menu': '7A85A4DB' },
2: { 'Up': '7A852B55',
'Down': '7A852C52',
'Left': '7A852D53',
'Right': '7A852E50',
'Enter': '7A852F51',
'Return': '7A853C42',
'Option': '7A856C12',
'Top Menu': '7A85A1DF',
'Pop Up Menu': '7A85A5DB' },
}
CURSOR_CODES[0] = CURSOR_CODES[1]
# Objects used in the GetInfo action
MENU_OBJECTS = [ 'Menu Layer', 'Menu Name' ]
LINE_OBJECTS = [ 'Line 1', 'Line 2', 'Line 3', 'Line 4', 'Line 5', 'Line 6', 'Line 7', 'Line 8', 'Current Line', 'Max Line' ]
GENERIC_PLAYBACK_OBJECTS = [ 'Playback Info', 'Repeat Mode', 'Shuffle', 'Artist', 'Album', 'Song' ] + MENU_OBJECTS + LINE_OBJECTS
ZONE_OBJECTS = [ 'Power', 'Sleep', 'Volume Level', 'Mute', 'Input Selection', 'Scene', 'Init Volume Mode', 'Init Volume Level', 'Max Volume Level' ]
MAIN_ZONE_OBJECTS = ZONE_OBJECTS + [ 'Straight', 'Enhancer', 'Sound Program', 'Treble', 'Bass' ]
NET_RADIO_OBJECTS = [ 'Playback Info', 'Station' ] + MENU_OBJECTS + LINE_OBJECTS
PANDORA_OBJECTS = [ 'Playback Info', 'Station', 'Album', 'Song' ] + MENU_OBJECTS + LINE_OBJECTS
SIRIUS_IR_OBJECTS = [ 'Playback Info', 'Artist', 'Channel', 'Title' ] + MENU_OBJECTS + LINE_OBJECTS
SIRIUS_OBJECTS = [ 'Antenna Strength', 'Category', 'Channel Number', 'Channel Name', 'Artist', 'Song', 'Composer' ]
SYSTEM_OBJECTS = [ 'Active Speakers', 'PreOut Levels' ]
# Supported zone definitions
#ALL_ZONES = [ 'Main Zone', 'Zone 2', 'Zone 3', 'Zone 4', 'Zone A', 'Zone B', 'Zone C', 'Zone D' ]
#ALL_ZONES_PLUS_ACTIVE = [ 'Active Zone' ] + ALL_ZONES
#TWO_ZONES = [ 'Main Zone', 'Zone 2' ]
#TWO_ZONES_PLUS_ACTIVE = [ 'Active Zone' ] + TWO_ZONES
def _get_lan_ip():
"""
Attempts to open a socket connection to Google's DNS
servers in order to determine the local IP address
of this computer. Eg, 192.168.1.100
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
ip = s.getsockname()[0]
s.close()
return ip
except:
return "192.168.1.100"
def _get_network_prefix():
"""
Returns the network prefix, which is the local IP address
without the last segment, Eg: 192.168.1.100 -> 192.168.1
"""
lan_ip = _get_lan_ip()
return lan_ip[:lan_ip.rfind('.')]
def _create_ip_range(range_start, range_end):
"""
Given a start ip, eg 192.168.1.1, and an end ip, eg 192.168.1.254,
generate a list of all of the ips within that range, including
the start and end ips.
"""
ip_range = []
start = int(range_start[range_start.rfind('.')+1:])
end = int(range_end[range_end.rfind('.')+1:])
for i in range(start, end+1):
ip = range_start[:range_start.rfind('.')+1] + str(i)
ip_range.append(ip)
return ip_range
def _open_to_close_tag(tag):
"""
Given an opening xml tag, return the matching close tag
eg. '<YAMAHA_AV cmd="PUT"> becomes </YAMAHA_AV>
"""
index = tag.find(' ')
if index == -1:
index = len(tag) - 1
return '</' + tag[1:index] + '>'
def _close_xml_tags(xml):
"""
Automagically takes an input xml string and returns that string
with all of the xml tags properly closed. It can even handle when
the open tag is in the middle of the string and not the end.
"""
output = []
stack = []
xml_chars = deque(list(xml))
c = None
while len(xml_chars) > 0:
while len(xml_chars) > 0 and c != '<':
c = xml_chars.popleft()
if c != '<':
output.append(c)
if c == '<':
temp = [ '<' ]
c = xml_chars.popleft()
end_tag = c == '/'
while c != '>':
temp.append(c)
c = xml_chars.popleft()
temp.append('>')
tag = ''.join(temp)
if end_tag:
other_tag = stack.pop()
other_close_tag = _open_to_close_tag(other_tag)
while other_close_tag != tag:
output.append(other_close_tag)
other_tag = stack.pop()
other_close_tag = _open_to_close_tag(other_tag)
elif not tag.endswith('/>'):
# Only add to stack if not self-closing
stack.append(tag)
output.append(tag)
while len(stack) > 0:
tag = stack.pop()
output.append(_open_to_close_tag(tag))
return ''.join(output)
def discover(model_name):
avs = discover_all()
for av in avs:
if model_name.upper() in ["ANY", "", None] or av.model_name.upper() == model_name.upper():
return av
return None
def discover_all():
"""
Blasts the network with requests, attempting to find any and all yamaha receivers
on the local network. First it detects the user's local ip address, eg 192.168.1.100.
Then, it converts that to the network prefix, eg 192.168.1, and then sends a request
to every ip on that subnet, eg 192.168.1.1 -> 192.168.1.254. It does each request on
a separate thread in order to avoid waiting for the timeout for every 254 requests
one by one.
"""
threads = []
# Get network prefix (eg 192.168.1)
net_prefix = _get_network_prefix()
ip_range = _create_ip_range(net_prefix + '.1', net_prefix + '.254')
result_queue = Queue.Queue()
for ip in ip_range:
t = Thread(target=_try_connect, kwargs={'ip': ip, 'result_queue': result_queue})
t.daemon = True
threads.append(t)
t.start()
for t in threads:
t.join()
avs = []
if result_queue.empty():
logger.info("Yamaha Receiver Was Not Found!")
while not result_queue.empty():
av = result_queue.get()
avs.append(av)
return avs
def _try_connect(ip, result_queue, port=80, timeout=1.0):
"""
Used with the auto-detect-ip functions, determines if a yamaha receiver is
waiting at the other end of the given ip address.
"""
try:
av = YamahaAV(ip, port)
model = av.get_config_string('Model_Name', timeout=timeout, ip=ip, print_error=False)
av.model_name = model
logger.info('{0}: {1}'.format(ip, model))
result_queue.put(av)
except:
pass
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for ramdisk deploy."""
import tempfile
from unittest import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base as drivers_base
from ironic.drivers.modules import agent_base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.drivers.modules import ramdisk
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
class RamdiskDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(RamdiskDeployTestCase, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.config(tftp_root=self.temp_dir, group='pxe')
self.temp_dir = tempfile.mkdtemp()
self.config(images_path=self.temp_dir, group='pxe')
self.config(enabled_deploy_interfaces=['ramdisk'])
self.config(enabled_boot_interfaces=['pxe'])
for iface in drivers_base.ALL_INTERFACES:
impl = 'fake'
if iface == 'network':
impl = 'noop'
if iface == 'deploy':
impl = 'ramdisk'
if iface == 'boot':
impl = 'pxe'
config_kwarg = {'enabled_%s_interfaces' % iface: [impl],
'default_%s_interface' % iface: impl}
self.config(**config_kwarg)
self.config(enabled_hardware_types=['fake-hardware'])
instance_info = {'kernel': 'kernelUUID',
'ramdisk': 'ramdiskUUID'}
self.node = obj_utils.create_test_node(
self.context,
driver='fake-hardware',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_ramdisk(
self, get_image_info_mock, cache_mock,
dhcp_factory_mock, switch_pxe_config_mock,
set_boot_device_mock):
provider_mock = mock.MagicMock()
dhcp_factory_mock.return_value = provider_mock
self.node.provision_state = states.DEPLOYING
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
get_image_info_mock.return_value = image_info
with task_manager.acquire(self.context, self.node.uuid) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=False)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=False, ip_version=6)
pxe_config_path = pxe_utils.get_pxe_config_file_path(
task.node.uuid)
task.node.properties['capabilities'] = 'boot_option:netboot'
task.node.driver_internal_info['is_whole_disk_image'] = False
task.driver.deploy.prepare(task)
task.driver.deploy.deploy(task)
get_image_info_mock.assert_called_once_with(task,
ipxe_enabled=False)
cache_mock.assert_called_once_with(
task, image_info, ipxe_enabled=False)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None,
CONF.deploy.default_boot_mode, False, ipxe_enabled=False,
iscsi_boot=False, ramdisk_boot=True, anaconda_boot=False)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
@mock.patch.object(ramdisk.LOG, 'warning', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_deploy(self, mock_image_info, mock_cache,
mock_dhcp_factory, mock_switch_config, mock_warning):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task, ipxe_enabled=False)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=False)
self.assertFalse(mock_warning.called)
i_info['configdrive'] = 'meow'
self.node.instance_info = i_info
self.node.save()
mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare(self, mock_prepare_instance):
node = self.node
node.provision_state = states.DEPLOYING
node.instance_info = {}
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
self.assertFalse(mock_prepare_instance.called)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_active(self, mock_prepare_instance):
node = self.node
node.provision_state = states.ACTIVE
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_unrescuing(self, mock_prepare_instance):
node = self.node
node.provision_state = states.UNRESCUING
node.save()
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate(self, mock_validate_img):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.validate(task)
self.assertTrue(mock_validate_img.called)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate_with_boot_iso(self, mock_validate_img):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info = {
'boot_iso': 'isoUUID'
}
task.driver.deploy.validate(task)
self.assertTrue(mock_validate_img.called)
@mock.patch.object(fake.FakeBoot, 'validate', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
def test_validate_interface_mismatch(self, mock_validate_image,
mock_boot_validate):
node = self.node
node.boot_interface = 'fake'
node.save()
self.config(enabled_boot_interfaces=['fake'],
default_boot_interface='fake')
with task_manager.acquire(self.context, node.uuid) as task:
error = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
error_message = ('Invalid configuration: The boot interface must '
'have the `ramdisk_boot` capability. You are '
'using an incompatible boot interface.')
self.assertEqual(error_message, str(error))
self.assertFalse(mock_boot_validate.called)
self.assertFalse(mock_validate_image.called)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_calls_boot_validate(self, mock_validate):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.validate(task)
mock_validate.assert_called_once_with(mock.ANY, task)
@mock.patch.object(manager_utils, 'restore_power_state_if_needed',
autospec=True)
@mock.patch.object(manager_utils, 'power_on_node_if_needed',
autospec=True)
@mock.patch.object(ramdisk.LOG, 'warning', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_deploy_with_smartnic_port(
self, mock_image_info, mock_cache,
mock_dhcp_factory, mock_switch_config, mock_warning,
power_on_node_if_needed_mock, restore_power_state_mock):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk')}
mock_image_info.return_value = image_info
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'ramdisk'}})
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
power_on_node_if_needed_mock.return_value = states.POWER_OFF
self.assertIsNone(task.driver.deploy.deploy(task))
mock_image_info.assert_called_once_with(task, ipxe_enabled=False)
mock_cache.assert_called_once_with(
task, image_info, ipxe_enabled=False)
self.assertFalse(mock_warning.called)
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
i_info['configdrive'] = 'meow'
self.node.instance_info = i_info
self.node.save()
mock_warning.reset_mock()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertIsNone(task.driver.deploy.deploy(task))
self.assertTrue(mock_warning.called)
@mock.patch.object(agent_base, 'get_steps', autospec=True)
def test_get_clean_steps(self, mock_get_steps):
# Test getting clean steps
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = task.driver.deploy.get_clean_steps(task)
mock_get_steps.assert_called_once_with(
task, 'clean', interface='deploy',
override_priorities={'erase_devices': None,
'erase_devices_metadata': None})
self.assertEqual(mock_steps, steps)
def test_get_deploy_steps(self):
# Only the default deploy step exists in the ramdisk deploy
expected = [{'argsinfo': None, 'interface': 'deploy', 'priority': 100,
'step': 'deploy'}]
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = task.driver.deploy.get_deploy_steps(task)
self.assertEqual(expected, steps)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_execute_clean_step(self, mock_execute_step):
step = {
'priority': 10,
'interface': 'deploy',
'step': 'erase_devices',
'reboot_requested': False
}
with task_manager.acquire(self.context, self.node.uuid) as task:
result = task.driver.deploy.execute_clean_step(task, step)
self.assertIs(result, mock_execute_step.return_value)
mock_execute_step.assert_called_once_with(task, step, 'clean')
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning(self, prepare_inband_cleaning_mock):
prepare_inband_cleaning_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
states.CLEANWAIT, task.driver.deploy.prepare_cleaning(task))
prepare_inband_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch.object(deploy_utils, 'tear_down_inband_cleaning',
autospec=True)
def test_tear_down_cleaning(self, tear_down_cleaning_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.tear_down_cleaning(task)
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
|
|
from __future__ import absolute_import
import pickle
from mock import call
from kombu import Connection, Exchange, Queue, binding
from kombu.exceptions import NotBoundError
from .mocks import Transport
from .utils import TestCase
from .utils import Mock
def get_conn():
return Connection(transport=Transport)
class test_binding(TestCase):
def test_constructor(self):
x = binding(
Exchange('foo'), 'rkey',
arguments={'barg': 'bval'},
unbind_arguments={'uarg': 'uval'},
)
self.assertEqual(x.exchange, Exchange('foo'))
self.assertEqual(x.routing_key, 'rkey')
self.assertDictEqual(x.arguments, {'barg': 'bval'})
self.assertDictEqual(x.unbind_arguments, {'uarg': 'uval'})
def test_declare(self):
chan = get_conn().channel()
x = binding(Exchange('foo'), 'rkey')
x.declare(chan)
self.assertIn('exchange_declare', chan)
def test_declare_no_exchange(self):
chan = get_conn().channel()
x = binding()
x.declare(chan)
self.assertNotIn('exchange_declare', chan)
def test_bind(self):
chan = get_conn().channel()
x = binding(Exchange('foo'))
x.bind(Exchange('bar')(chan))
self.assertIn('exchange_bind', chan)
def test_unbind(self):
chan = get_conn().channel()
x = binding(Exchange('foo'))
x.unbind(Exchange('bar')(chan))
self.assertIn('exchange_unbind', chan)
def test_repr(self):
b = binding(Exchange('foo'), 'rkey')
self.assertIn('foo', repr(b))
self.assertIn('rkey', repr(b))
class test_Exchange(TestCase):
def test_bound(self):
exchange = Exchange('foo', 'direct')
self.assertFalse(exchange.is_bound)
self.assertIn('<unbound', repr(exchange))
chan = get_conn().channel()
bound = exchange.bind(chan)
self.assertTrue(bound.is_bound)
self.assertIs(bound.channel, chan)
self.assertIn('bound to chan:%r' % (chan.channel_id, ),
repr(bound))
def test_hash(self):
self.assertEqual(hash(Exchange('a')), hash(Exchange('a')))
self.assertNotEqual(hash(Exchange('a')), hash(Exchange('b')))
def test_can_cache_declaration(self):
self.assertTrue(Exchange('a', durable=True).can_cache_declaration)
self.assertFalse(Exchange('a', durable=False).can_cache_declaration)
def test_pickle(self):
e1 = Exchange('foo', 'direct')
e2 = pickle.loads(pickle.dumps(e1))
self.assertEqual(e1, e2)
def test_eq(self):
e1 = Exchange('foo', 'direct')
e2 = Exchange('foo', 'direct')
self.assertEqual(e1, e2)
e3 = Exchange('foo', 'topic')
self.assertNotEqual(e1, e3)
self.assertEqual(e1.__eq__(True), NotImplemented)
def test_revive(self):
exchange = Exchange('foo', 'direct')
conn = get_conn()
chan = conn.channel()
# reviving unbound channel is a noop.
exchange.revive(chan)
self.assertFalse(exchange.is_bound)
self.assertIsNone(exchange._channel)
bound = exchange.bind(chan)
self.assertTrue(bound.is_bound)
self.assertIs(bound.channel, chan)
chan2 = conn.channel()
bound.revive(chan2)
self.assertTrue(bound.is_bound)
self.assertIs(bound._channel, chan2)
def test_assert_is_bound(self):
exchange = Exchange('foo', 'direct')
with self.assertRaises(NotBoundError):
exchange.declare()
conn = get_conn()
chan = conn.channel()
exchange.bind(chan).declare()
self.assertIn('exchange_declare', chan)
def test_set_transient_delivery_mode(self):
exc = Exchange('foo', 'direct', delivery_mode='transient')
self.assertEqual(exc.delivery_mode, Exchange.TRANSIENT_DELIVERY_MODE)
def test_set_passive_mode(self):
exc = Exchange('foo', 'direct', passive=True)
self.assertTrue(exc.passive)
def test_set_persistent_delivery_mode(self):
exc = Exchange('foo', 'direct', delivery_mode='persistent')
self.assertEqual(exc.delivery_mode, Exchange.PERSISTENT_DELIVERY_MODE)
def test_bind_at_instantiation(self):
self.assertTrue(Exchange('foo', channel=get_conn().channel()).is_bound)
def test_create_message(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).Message({'foo': 'bar'})
self.assertIn('prepare_message', chan)
def test_publish(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).publish('the quick brown fox')
self.assertIn('basic_publish', chan)
def test_delete(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).delete()
self.assertIn('exchange_delete', chan)
def test__repr__(self):
b = Exchange('foo', 'topic')
self.assertIn('foo(topic)', repr(b))
self.assertIn('Exchange', repr(b))
def test_bind_to(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
bar = Exchange('bar', 'topic')
foo(chan).bind_to(bar)
self.assertIn('exchange_bind', chan)
def test_bind_to_by_name(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
foo(chan).bind_to('bar')
self.assertIn('exchange_bind', chan)
def test_unbind_from(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
bar = Exchange('bar', 'topic')
foo(chan).unbind_from(bar)
self.assertIn('exchange_unbind', chan)
def test_unbind_from_by_name(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
foo(chan).unbind_from('bar')
self.assertIn('exchange_unbind', chan)
class test_Queue(TestCase):
def setUp(self):
self.exchange = Exchange('foo', 'direct')
def test_hash(self):
self.assertEqual(hash(Queue('a')), hash(Queue('a')))
self.assertNotEqual(hash(Queue('a')), hash(Queue('b')))
def test_repr_with_bindings(self):
ex = Exchange('foo')
x = Queue('foo', bindings=[ex.binding('A'), ex.binding('B')])
self.assertTrue(repr(x))
def test_anonymous(self):
chan = Mock()
x = Queue(bindings=[binding(Exchange('foo'), 'rkey')])
chan.queue_declare.return_value = 'generated', 0, 0
xx = x(chan)
xx.declare()
self.assertEqual(xx.name, 'generated')
def test_when_bound_but_no_exchange(self):
q = Queue('a')
q.exchange = None
self.assertIsNone(q.when_bound())
def test_declare_but_no_exchange(self):
q = Queue('a')
q.queue_declare = Mock()
q.queue_bind = Mock()
q.exchange = None
q.declare()
q.queue_declare.assert_called_with(False, passive=False)
def test_bind_to_when_name(self):
chan = Mock()
q = Queue('a')
q(chan).bind_to('ex')
self.assertTrue(chan.queue_bind.called)
def test_get_when_no_m2p(self):
chan = Mock()
q = Queue('a')(chan)
chan.message_to_python = None
self.assertTrue(q.get())
def test_multiple_bindings(self):
chan = Mock()
q = Queue('mul', [
binding(Exchange('mul1'), 'rkey1'),
binding(Exchange('mul2'), 'rkey2'),
binding(Exchange('mul3'), 'rkey3'),
])
q(chan).declare()
self.assertIn(
call(
nowait=False,
exchange='mul1',
auto_delete=False,
passive=False,
arguments=None,
type='direct',
durable=True,
),
chan.exchange_declare.call_args_list,
)
def test_can_cache_declaration(self):
self.assertTrue(Queue('a', durable=True).can_cache_declaration)
self.assertFalse(Queue('a', durable=False).can_cache_declaration)
def test_eq(self):
q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
self.assertEqual(q1, q2)
self.assertEqual(q1.__eq__(True), NotImplemented)
q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx')
self.assertNotEqual(q1, q3)
def test_exclusive_implies_auto_delete(self):
self.assertTrue(
Queue('foo', self.exchange, exclusive=True).auto_delete,
)
def test_binds_at_instantiation(self):
self.assertTrue(Queue('foo', self.exchange,
channel=get_conn().channel()).is_bound)
def test_also_binds_exchange(self):
chan = get_conn().channel()
b = Queue('foo', self.exchange)
self.assertFalse(b.is_bound)
self.assertFalse(b.exchange.is_bound)
b = b.bind(chan)
self.assertTrue(b.is_bound)
self.assertTrue(b.exchange.is_bound)
self.assertIs(b.channel, b.exchange.channel)
self.assertIsNot(b.exchange, self.exchange)
def test_declare(self):
chan = get_conn().channel()
b = Queue('foo', self.exchange, 'foo', channel=chan)
self.assertTrue(b.is_bound)
b.declare()
self.assertIn('exchange_declare', chan)
self.assertIn('queue_declare', chan)
self.assertIn('queue_bind', chan)
def test_get(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.get()
self.assertIn('basic_get', b.channel)
def test_purge(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.purge()
self.assertIn('queue_purge', b.channel)
def test_consume(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.consume('fifafo', None)
self.assertIn('basic_consume', b.channel)
def test_cancel(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.cancel('fifafo')
self.assertIn('basic_cancel', b.channel)
def test_delete(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.delete()
self.assertIn('queue_delete', b.channel)
def test_queue_unbind(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.queue_unbind()
self.assertIn('queue_unbind', b.channel)
def test_as_dict(self):
q = Queue('foo', self.exchange, 'rk')
d = q.as_dict(recurse=True)
self.assertEqual(d['exchange']['name'], self.exchange.name)
def test__repr__(self):
b = Queue('foo', self.exchange, 'foo')
self.assertIn('foo', repr(b))
self.assertIn('Queue', repr(b))
|
|
from __future__ import print_function, division, absolute_import
from collections import namedtuple
import sys
from llvmlite.ir import Value
from llvmlite.llvmpy.core import Constant, Type, Builder
from . import (_dynfunc, cgutils, config, funcdesc, generators, ir, types,
typing, utils)
from .errors import LoweringError
class Environment(_dynfunc.Environment):
__slots__ = ()
@classmethod
def from_fndesc(cls, fndesc):
mod = fndesc.lookup_module()
return cls(mod.__dict__)
def __reduce__(self):
return _rebuild_env, (self.globals['__name__'], self.consts)
def _rebuild_env(modname, consts):
from . import serialize
mod = serialize._rebuild_module(modname)
env = Environment(mod.__dict__)
env.consts[:] = consts
return env
_VarArgItem = namedtuple("_VarArgItem", ("vararg", "index"))
class BaseLower(object):
"""
Lower IR to LLVM
"""
# If true, then can't cache LLVM module accross process calls
has_dynamic_globals = False
def __init__(self, context, library, fndesc, interp):
self.context = context
self.library = library
self.fndesc = fndesc
self.blocks = utils.SortedMap(utils.iteritems(interp.blocks))
self.interp = interp
self.call_conv = context.call_conv
self.generator_info = self.interp.generator_info
# Initialize LLVM
self.module = self.library.create_ir_module(self.fndesc.unique_name)
# Python execution environment (will be available to the compiled
# function).
self.env = Environment.from_fndesc(self.fndesc)
# Internal states
self.blkmap = {}
self.varmap = {}
self.firstblk = min(self.blocks.keys())
self.loc = -1
# Subclass initialization
self.init()
def init(self):
pass
def init_pyapi(self):
"""
Init the Python API and Environment Manager for the function being
lowered.
"""
if self.pyapi is not None:
return
self.pyapi = self.context.get_python_api(self.builder)
# Store environment argument for later use
self.envarg = self.call_conv.get_env_argument(self.function)
# Sanity check
with cgutils.if_unlikely(self.builder,
cgutils.is_null(self.builder, self.envarg)):
self.pyapi.err_set_string(
"PyExc_SystemError",
"Numba internal error: object mode function called "
"without an environment")
self.call_conv.return_exc(self.builder)
self.env_body = self.context.get_env_body(self.builder, self.envarg)
self.pyapi.emit_environment_sentry(self.envarg)
self.env_manager = self.pyapi.get_env_manager(self.env, self.env_body,
self.envarg)
def pre_lower(self):
"""
Called before lowering all blocks.
"""
# A given Lower object can be used for several LL functions
# (for generators) and it's important to use a new API and
# EnvironmentManager.
self.pyapi = None
def post_lower(self):
"""
Called after all blocks are lowered
"""
def pre_block(self, block):
"""
Called before lowering a block.
"""
def return_exception(self, exc_class, exc_args=None):
self.call_conv.return_user_exc(self.builder, exc_class, exc_args)
def lower(self):
if self.generator_info is None:
self.genlower = None
self.lower_normal_function(self.fndesc)
else:
self.genlower = self.GeneratorLower(self)
self.gentype = self.genlower.gentype
self.genlower.lower_init_func(self)
self.genlower.lower_next_func(self)
if self.gentype.has_finalizer:
self.genlower.lower_finalize_func(self)
if config.DUMP_LLVM:
print(("LLVM DUMP %s" % self.fndesc).center(80, '-'))
print(self.module)
print('=' * 80)
# Run target specific post lowering transformation
self.context.post_lowering(self.module, self.library)
# Materialize LLVM Module
self.library.add_ir_module(self.module)
def extract_function_arguments(self):
rawfnargs = self.call_conv.get_arguments(self.function)
arginfo = self.context.get_arg_packer(self.fndesc.argtypes)
self.fnargs = arginfo.from_arguments(self.builder, rawfnargs)
return self.fnargs
def lower_normal_function(self, fndesc):
"""
Lower non-generator *fndesc*.
"""
self.setup_function(fndesc)
# Init argument values
self.extract_function_arguments()
entry_block_tail = self.lower_function_body()
# Close tail of entry block
self.builder.position_at_end(entry_block_tail)
self.builder.branch(self.blkmap[self.firstblk])
def lower_function_body(self):
"""
Lower the current function's body, and return the entry block.
"""
# Init Python blocks
for offset in self.blocks:
bname = "B%s" % offset
self.blkmap[offset] = self.function.append_basic_block(bname)
self.pre_lower()
# pre_lower() may have changed the current basic block
entry_block_tail = self.builder.basic_block
self.debug_print("# function begin: {0}".format(
self.fndesc.unique_name))
# Lower all blocks
for offset, block in self.blocks.items():
bb = self.blkmap[offset]
self.builder.position_at_end(bb)
self.lower_block(block)
self.post_lower()
return entry_block_tail
def lower_block(self, block):
"""
Lower the given block.
"""
self.pre_block(block)
for inst in block.body:
self.loc = inst.loc
try:
self.lower_inst(inst)
except LoweringError:
raise
except Exception as e:
msg = "Internal error:\n%s: %s" % (type(e).__name__, e)
raise LoweringError(msg, inst.loc)
def create_cpython_wrapper(self, release_gil=False):
"""
Create CPython wrapper(s) around this function (or generator).
"""
if self.genlower:
self.context.create_cpython_wrapper(self.library,
self.genlower.gendesc,
self.env, self.call_helper,
release_gil=release_gil)
self.context.create_cpython_wrapper(self.library, self.fndesc,
self.env, self.call_helper,
release_gil=release_gil)
def setup_function(self, fndesc):
# Setup function
self.function = self.context.declare_function(self.module, fndesc)
self.entry_block = self.function.append_basic_block('entry')
self.builder = Builder.new(self.entry_block)
self.call_helper = self.call_conv.init_call_helper(self.builder)
def typeof(self, varname):
return self.fndesc.typemap[varname]
def debug_print(self, msg):
if config.DEBUG_JIT:
self.context.debug_print(self.builder, "DEBUGJIT: {0}".format(msg))
class Lower(BaseLower):
GeneratorLower = generators.GeneratorLower
def lower_inst(self, inst):
self.debug_print(str(inst))
if isinstance(inst, ir.Assign):
ty = self.typeof(inst.target.name)
val = self.lower_assign(ty, inst)
self.storevar(val, inst.target.name)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
condty = self.typeof(inst.cond.name)
pred = self.context.cast(self.builder, cond, condty, types.boolean)
assert pred.type == Type.int(1), ("cond is not i1: %s" % pred.type)
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Return):
if self.generator_info:
# StopIteration
self.genlower.return_from_generator(self)
return
val = self.loadvar(inst.value.name)
oty = self.typeof(inst.value.name)
ty = self.fndesc.restype
if isinstance(ty, types.Optional):
# If returning an optional type
self.call_conv.return_optional_value(self.builder, ty, oty, val)
return
if ty != oty:
val = self.context.cast(self.builder, val, oty, ty)
retval = self.context.get_return_value(self.builder, ty, val)
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
index = self.loadvar(inst.index.name)
targetty = self.typeof(inst.target.name)
valuety = self.typeof(inst.value.name)
indexty = self.typeof(inst.index.name)
signature = self.fndesc.calltypes[inst]
assert signature is not None
impl = self.context.get_function('setitem', signature)
# Convert argument to match
if isinstance(targetty, types.Optional):
target = self.context.cast(self.builder, target, targetty,
targetty.type)
else:
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
value = self.context.cast(self.builder, value, valuety,
signature.args[2])
return impl(self.builder, (target, index, value))
elif isinstance(inst, ir.DelItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
targetty = self.typeof(inst.target.name)
indexty = self.typeof(inst.index.name)
signature = self.fndesc.calltypes[inst]
assert signature is not None
impl = self.context.get_function('delitem', signature)
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
return impl(self.builder, (target, index))
elif isinstance(inst, ir.Del):
try:
# XXX: incorrect Del injection?
val = self.loadvar(inst.value)
except KeyError:
pass
else:
self.decref(self.typeof(inst.value), val)
self._delete_variable(inst.value)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
signature = self.fndesc.calltypes[inst]
targetty = self.typeof(inst.target.name)
valuety = self.typeof(inst.value.name)
assert signature is not None
assert signature.args[0] == targetty
impl = self.context.get_setattr(inst.attr, signature)
# Convert argument to match
value = self.context.cast(self.builder, value, valuety,
signature.args[1])
return impl(self.builder, (target, value))
elif isinstance(inst, ir.Raise):
self.lower_raise(inst)
else:
raise NotImplementedError(type(inst))
def lower_raise(self, inst):
if inst.exception is None:
# Reraise
self.return_exception(None)
else:
exctype = self.typeof(inst.exception.name)
if isinstance(exctype, types.ExceptionInstance):
# raise <instance> => find the instantiation site
excdef = self.interp.get_definition(inst.exception)
if (not isinstance(excdef, ir.Expr) or excdef.op != 'call'
or excdef.kws):
raise NotImplementedError("unsupported kind of raising")
# Try to infer the args tuple
args = tuple(self.interp.get_definition(arg).infer_constant()
for arg in excdef.args)
elif isinstance(exctype, types.ExceptionClass):
args = None
else:
raise NotImplementedError("cannot raise value of type %s"
% (exctype,))
self.return_exception(exctype.exc_class, args)
def lower_assign(self, ty, inst):
value = inst.value
# In nopython mode, closure vars are frozen like globals
if isinstance(value, (ir.Const, ir.Global, ir.FreeVar)):
if isinstance(ty, types.ExternalFunctionPointer):
res = self.context.get_constant_generic(self.builder, ty,
value.value)
self.has_dynamic_globals = True
elif isinstance(ty, types.Dummy):
res = self.context.get_dummy_value()
elif isinstance(ty, types.Array):
res = self.context.make_constant_array(self.builder, ty,
value.value)
else:
res = self.context.get_constant_generic(self.builder, ty,
value.value)
self.incref(ty, res)
return res
elif isinstance(value, ir.Expr):
return self.lower_expr(ty, value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
oty = self.typeof(value.name)
res = self.context.cast(self.builder, val, oty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Arg):
res = self.fnargs[value.index]
self.incref(ty, res)
return res
elif isinstance(value, ir.Yield):
res = self.lower_yield(ty, value)
self.incref(ty, res)
return res
raise NotImplementedError(type(value), value)
def lower_yield(self, retty, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
y = generators.LowerYield(self, yp, yp.live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
typ = self.typeof(inst.value.name)
val = self.context.cast(self.builder, val, typ, self.gentype.yield_type)
self.call_conv.return_value(self.builder, val)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.context.get_constant_generic(self.builder, retty, None)
def lower_binop(self, resty, expr, op):
lhs = expr.lhs
rhs = expr.rhs
lty = self.typeof(lhs.name)
rty = self.typeof(rhs.name)
lhs = self.loadvar(lhs.name)
rhs = self.loadvar(rhs.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(op, signature)
# Convert argument to match
lhs = self.context.cast(self.builder, lhs, lty, signature.args[0])
rhs = self.context.cast(self.builder, rhs, rty, signature.args[1])
res = impl(self.builder, (lhs, rhs))
return self.context.cast(self.builder, res,
signature.return_type, resty)
def _cast_var(self, var, ty):
"""
Cast a Numba IR variable to the given Numba type, returning a
low-level value.
"""
if isinstance(var, _VarArgItem):
varty = self.typeof(var.vararg.name)[var.index]
val = self.builder.extract_value(self.loadvar(var.vararg.name),
var.index)
else:
varty = self.typeof(var.name)
val = self.loadvar(var.name)
return self.context.cast(self.builder, val, varty, ty)
def lower_call(self, resty, expr):
signature = self.fndesc.calltypes[expr]
if isinstance(signature.return_type, types.Phantom):
return self.context.get_dummy_value()
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
argvals = expr.func.args
else:
fnty = self.typeof(expr.func.name)
pos_args = expr.args
if expr.vararg:
# Inject *args from function call
# The lowering will be done in _cast_var() above.
tp_vararg = self.typeof(expr.vararg.name)
assert isinstance(tp_vararg, types.BaseTuple)
pos_args = pos_args + [_VarArgItem(expr.vararg, i)
for i in range(len(tp_vararg))]
# Fold keyword arguments and resolve default argument values
pysig = signature.pysig
if pysig is None:
if expr.kws:
raise NotImplementedError("unsupported keyword arguments "
"when calling %s" % (fnty,))
argvals = [self._cast_var(var, sigty)
for var, sigty in zip(pos_args, signature.args)]
else:
def normal_handler(index, param, var):
return self._cast_var(var, signature.args[index])
def default_handler(index, param, default):
return self.context.get_constant_generic(
self.builder, signature.args[index], default)
def stararg_handler(index, param, vars):
values = [self._cast_var(var, sigty)
for var, sigty in
zip(vars, signature.args[index])]
return cgutils.make_anonymous_struct(self.builder, values)
argvals = typing.fold_arguments(pysig,
pos_args, dict(expr.kws),
normal_handler,
default_handler,
stararg_handler)
if isinstance(fnty, types.ExternalFunction):
# Handle a named external function
self.debug_print("# external function")
fndesc = funcdesc.ExternalFunctionDescriptor(
fnty.symbol, fnty.sig.return_type, fnty.sig.args)
func = self.context.declare_external_function(self.builder.module,
fndesc)
res = self.context.call_external_function(
self.builder, func, fndesc.argtypes, argvals)
elif isinstance(fnty, types.NumbaFunction):
# Handle a compiled Numba function
self.debug_print("# calling numba function")
res = self.context.call_internal(self.builder, fnty.fndesc,
fnty.sig, argvals)
elif isinstance(fnty, types.ExternalFunctionPointer):
self.debug_print("# calling external function pointer")
# Handle a C function pointer
pointer = self.loadvar(expr.func.name)
# If the external function pointer uses libpython
if fnty.requires_gil:
self.init_pyapi()
# Acquire the GIL
gil_state = self.pyapi.gil_ensure()
# Make PyObjects
newargvals = []
pyvals = []
for exptyp, gottyp, aval in zip(fnty.sig.args, signature.args,
argvals):
# Adjust argument values to pyobjects
if exptyp == types.ffi_forced_object:
self.incref(gottyp, aval)
obj = self.pyapi.from_native_value(aval, gottyp,
self.env_manager)
newargvals.append(obj)
pyvals.append(obj)
else:
newargvals.append(aval)
# Call external function
res = self.context.call_function_pointer(self.builder, pointer,
newargvals, fnty.cconv)
# Release PyObjects
for obj in pyvals:
self.pyapi.decref(obj)
# Release the GIL
self.pyapi.gil_release(gil_state)
# If the external function pointer does NOT use libpython
else:
res = self.context.call_function_pointer(self.builder, pointer,
argvals, fnty.cconv)
else:
# Normal function resolution (for Numba-compiled functions)
self.debug_print("# calling normal function: {0}".format(fnty))
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
argvals = [the_self] + list(argvals)
res = impl(self.builder, argvals)
libs = getattr(impl, "libs", ())
for lib in libs:
self.library.add_linking_library(lib)
return self.context.cast(self.builder, res, signature.return_type,
resty)
def lower_expr(self, resty, expr):
if expr.op == 'binop':
return self.lower_binop(resty, expr, expr.fn)
elif expr.op == 'inplace_binop':
lty = self.typeof(expr.lhs.name)
if lty.mutable:
return self.lower_binop(resty, expr, expr.fn)
else:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.
return self.lower_binop(resty, expr, expr.immutable_fn)
elif expr.op == 'unary':
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
res = self.context.cast(self.builder, res,
signature.return_type, resty)
return res
elif expr.op == 'call':
res = self.lower_call(resty, expr)
return res
elif expr.op == 'pair_first':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_first(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op == 'pair_second':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_second(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op in ('getiter', 'iternext'):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
res = self.context.cast(self.builder, res, signature.return_type,
resty)
return res
elif expr.op == 'exhaust_iter':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# If we have a tuple, we needn't do anything
# (and we can't iterate over the heterogenous ones).
if isinstance(ty, types.BaseTuple):
assert ty == resty
self.incref(ty, val)
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function('getiter',
getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function('iternext',
iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder,
pair, pairty)
with cgutils.if_unlikely(self.builder,
self.builder.not_(is_valid)):
self.return_exception(ValueError)
item = self.context.pair_first(self.builder,
pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder,
pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError)
self.decref(ty.iterator_type, iterobj)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
res = self.context.get_bound_function(self.builder, val, ty)
self.incref(resty, res)
return res
else:
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
return self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
return res
elif expr.op == "static_getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.context.get_constant(types.intp, expr.index)
if cgutils.is_struct(baseval.type):
# Statically extract the given element from the structure
# (structures aren't dynamically indexable).
res = self.builder.extract_value(baseval, expr.index)
self.incref(resty, res)
return res
else:
# Fall back on the generic getitem() implementation
# for this type.
signature = typing.signature(resty,
self.typeof(expr.value.name),
types.intp)
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
res = impl(self.builder, argvals)
return self.context.cast(self.builder, res,
signature.return_type, resty)
elif expr.op == "getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.value.name),
self.typeof(expr.index.name))
castvals = [self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps,
signature.args)]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res,
signature.return_type,
resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)]
tup = self.context.make_tuple(self.builder, resty, castvals)
self.incref(resty, tup)
return tup
elif expr.op == "build_list":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty, resty.dtype)
for val, fromty in zip(itemvals, itemtys)]
return self.context.build_list(self.builder, resty, castvals)
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
self.incref(resty, val)
return castval
elif expr.op in self.context.special_ops:
res = self.context.special_ops[expr.op](self, expr)
return res
raise NotImplementedError(expr)
def getvar(self, name):
return self.varmap[name]
def loadvar(self, name):
ptr = self.getvar(name)
return self.builder.load(ptr)
def storevar(self, value, name):
fetype = self.typeof(name)
# Define if not already
if name not in self.varmap:
# If not already defined, allocate it
llty = self.context.get_value_type(fetype)
ptr = self.alloca_lltype(name, llty)
# Remember the pointer
self.varmap[name] = ptr
# Clean up existing value stored in the variable
old = self.loadvar(name)
self.decref(fetype, old)
# Store variable
ptr = self.getvar(name)
if value.type != ptr.type.pointee:
msg = ("Storing {value.type} to ptr of {ptr.type.pointee} ('{name}'). "
"FE type {fetype}").format(value=value, ptr=ptr,
fetype=fetype, name=name)
raise AssertionError(msg)
self.builder.store(value, ptr)
def alloca(self, name, type):
lltype = self.context.get_value_type(type)
return self.alloca_lltype(name, lltype)
def alloca_lltype(self, name, lltype):
return cgutils.alloca_once(self.builder, lltype, name=name, zfill=True)
def incref(self, typ, val):
if not self.context.enable_nrt:
return
self.context.nrt_incref(self.builder, typ, val)
def decref(self, typ, val):
if not self.context.enable_nrt:
return
self.context.nrt_decref(self.builder, typ, val)
def _delete_variable(self, varname):
"""
Zero-fill variable to avoid crashing due to extra ir.Del
"""
storage = self.getvar(varname)
self.builder.store(Constant.null(storage.type.pointee), storage)
|
|
'''Top-level entry points to yakonfig.
.. This software is released under an MIT/X11 open source license.
Copyright 2014-2015 Diffeo, Inc.
Most programs' `main()` functions will call yakonfig as::
parser = argparse.ArgumentParser()
yakonfig.parse_args(parser, [yakonfig, module, module...])
where the list of modules are top-level modules or other
:class:`yakonfig.Configurable` objects the program uses.
Test code and other things not driven by argparse can instead call::
yakonfig.set_default_config([yakonfig, module, module, ...])
or::
with yakofnig.defaulted_config([yakonfig, ...]):
...
.. autofunction:: parse_args
.. autofunction:: set_default_config
.. autofunction:: defaulted_config
'''
from __future__ import absolute_import
import collections
import contextlib
import copy
import logging
import sys
from six import iteritems, StringIO
import yaml as yaml_mod
from .exceptions import ConfigurationError, ProgrammerError
from .merge import overlay_config, diff_config
from .yakonfig import get_global_config, set_global_config, _temporary_config
# These implement the Configurable interface for yakonfig proper!
config_name = 'yakonfig'
logger = logging.getLogger(__name__)
def add_arguments(parser):
'''Add command-line arguments for yakonfig proper.
This is part of the :class:`~yakonfig.Configurable` interface, and
is usually run by including :mod:`yakonfig` in the
:func:`parse_args()` module list.
:param argparse.ArgumentParser parser: command-line argument
parser
'''
parser.add_argument('--config', '-c', metavar='FILE',
help='read configuration from FILE')
parser.add_argument('--dump-config', metavar='WHAT', nargs='?',
help='dump out configuration then stop '
'(default, effective, full)')
runtime_keys = {'config': 'config'}
def parse_args(parser, modules, args=None):
"""Set up global configuration for command-line tools.
`modules` is an iterable of
:class:`yakonfig.Configurable` objects, or anything
equivalently typed. This function iterates through those objects
and calls
:meth:`~yakonfig.Configurable.add_arguments` on
each to build up a complete list of command-line arguments, then
calls :meth:`argparse.ArgumentParser.parse_args` to actually
process the command line. This produces a configuration that is a
combination of all default values declared by all modules;
configuration specified in ``--config`` arguments; and overriding
configuration values specified in command-line arguments.
This returns the :class:`argparse.Namespace` object, in case the
application has defined its own command-line parameters and
needs to process them. The new global configuration can be
obtained via :func:`yakonfig.get_global_config`.
:param argparse.ArgumentParser parser: application-provided
argument parser
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.Configurable`
:param args: command-line options, or `None` to use `sys.argv`
:return: the new global configuration
"""
collect_add_argparse(parser, modules)
namespace = parser.parse_args(args)
try:
do_dump_config = getattr(namespace, 'dump_config', None)
set_default_config(modules, params=vars(namespace),
validate=not do_dump_config)
if do_dump_config:
if namespace.dump_config == 'full':
to_dump = get_global_config()
elif namespace.dump_config == 'default':
to_dump = assemble_default_config(modules)
else: # 'effective'
to_dump = diff_config(assemble_default_config(modules),
get_global_config())
yaml_mod.dump(to_dump, sys.stdout)
parser.exit()
except ConfigurationError as e:
parser.error(e)
return namespace
def set_default_config(modules, params=None, yaml=None, filename=None,
config=None, validate=True):
"""Set up global configuration for tests and noninteractive tools.
`modules` is an iterable of
:class:`yakonfig.Configurable` objects, or anything
equivalently typed. This function iterates through those objects
to produce a default configuration, reads `yaml` as though it were
the configuration file, and fills in any values from `params` as
though they were command-line arguments. The resulting
configuration is set as the global configuration.
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.Configurable`
:param dict params: dictionary of command-line argument key to values
:param str yaml: global configuration file
:param str filename: location of global configuration file
:param dict config: global configuration object
:param bool validate: check configuration after creating
:return: the new global configuration
:returntype: dict
"""
if params is None:
params = {}
# Get the configuration from the file, or from params['config']
file_config = {}
if yaml is None and filename is None and config is None:
if 'config' in params and params['config'] is not None:
filename = params['config']
if yaml is not None or filename is not None or config is not None:
if yaml is not None:
file_config = yaml_mod.load(StringIO(yaml))
elif filename is not None:
with open(filename, 'r') as f:
file_config = yaml_mod.load(f)
elif config is not None:
file_config = config
# First pass: set up to call replace_config()
# Assemble the configuration from defaults + file + arguments
base_config = copy.deepcopy(file_config)
create_config_tree(base_config, modules)
fill_in_arguments(base_config, modules, params)
default_config = assemble_default_config(modules)
base_config = overlay_config(default_config, base_config)
# Replace the modules list (accommodate external modules)
def replace_module(config, m):
name = getattr(m, 'config_name')
c = config.get(name, {})
if hasattr(m, 'replace_config'):
return getattr(m, 'replace_config')(c, name)
return m
modules = [replace_module(base_config, m) for m in modules]
# Reassemble the configuration again, this time reaching out to
# the environment
base_config = file_config
create_config_tree(base_config, modules)
fill_in_arguments(base_config, modules, params)
do_config_discovery(base_config, modules)
default_config = assemble_default_config(modules)
base_config = overlay_config(default_config, file_config)
fill_in_arguments(base_config, modules, params)
# Validate the configuration
if validate and len(modules) > 0:
mod = modules[-1]
checker = getattr(mod, 'check_config', None)
if checker is not None:
with _temporary_config():
set_global_config(base_config)
checker(base_config[mod.config_name], mod.config_name)
# All done, normalize and set the global configuration
normalize_config(base_config, modules)
set_global_config(base_config)
return base_config
@contextlib.contextmanager
def defaulted_config(modules, params=None, yaml=None, filename=None,
config=None, validate=True):
"""Context manager version of :func:`set_default_config()`.
Use this with a Python 'with' statement, like
>>> config_yaml = '''
... toplevel:
... param: value
... '''
>>> with yakonfig.defaulted_config([toplevel], yaml=config_yaml) as config:
... assert 'param' in config['toplevel']
... assert yakonfig.get_global_config('toplevel', 'param') == 'value'
On exit the global configuration is restored to its previous state
(if any).
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.Configurable`
:param dict params: dictionary of command-line argument key to values
:param str yaml: global configuration file
:param str filename: location of global configuration file
:param dict config: global configuration object
:param bool validate: check configuration after creating
:return: the new global configuration
"""
with _temporary_config():
set_default_config(modules, params=params, yaml=yaml,
filename=filename, config=config, validate=validate)
yield get_global_config()
def check_toplevel_config(what, who):
"""Verify that some dependent configuration is present and correct.
This will generally be called from a
:meth:`~yakonfig.Configurable.check_config` implementation.
`what` is a :class:`~yakonfig.Configurable`-like object. If the
corresponding configuration isn't present in the global
configuration, raise a :exc:`yakonfig.ConfigurationError`
explaining that `who` required it. Otherwise call that module's
:meth:`~yakonfig.Configurable.check_config` (if any).
:param yakonfig.Configurable what: top-level module to require
:param str who: name of the requiring module
:raise yakonfig.ConfigurationError: if configuration for
`what` is missing or incorrect
"""
config_name = what.config_name
config = get_global_config()
if config_name not in config:
raise ConfigurationError(
'{0} requires top-level configuration for {1}'
.format(who, config_name))
checker = getattr(what, 'check_config', None)
if checker:
checker(config[config_name], config_name)
def _recurse_config(parent_config, modules, f, prefix=''):
'''Walk through the module tree.
This is a helper function for :func:`create_config_tree` and
:func:`_walk_config`. It calls `f` once for each module in the
configuration tree with parameters `parent_config`, `config_name`,
`prefix`, and `module`. `parent_config[config_name]` may or may
not exist (but could be populated, as :func:`create_config_tree`).
If even the parent configuration doesn't exist, `parent_config`
could be :const:`None`.
:param dict parent_config: configuration dictionary holding
configuration for `modules`, or maybe :const:`None`
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param f: callable to call on each module
:param str prefix: prefix name of `parent_config`
:return: `parent_config`
'''
for module in modules:
config_name = getattr(module, 'config_name', None)
if config_name is None:
raise ProgrammerError('{0!r} must provide a config_name'
.format(module))
new_name = prefix + config_name
f(parent_config, config_name, new_name, module)
try:
_recurse_config((parent_config or {}).get(config_name, None),
getattr(module, 'sub_modules', []),
f,
new_name + '.')
except:
# achieve a sort of stack trace on the way out
logger.error('exception in _recurse_config of %s', module)
raise
return parent_config
def create_config_tree(config, modules, prefix=''):
'''Cause every possible configuration sub-dictionary to exist.
This is intended to be called very early in the configuration
sequence. For each module, it checks that the corresponding
configuration item exists in `config` and creates it as an empty
dictionary if required, and then recurses into child
configs/modules.
:param dict config: configuration to populate
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param str prefix: prefix name of the config
:return: `config`
:raises yakonfig.ConfigurationError: if an expected name is present
in the provided config, but that name is not a dictionary
'''
def work_in(parent_config, config_name, prefix, module):
if config_name not in parent_config:
# this is the usual, expected case
parent_config[config_name] = {}
elif not isinstance(parent_config[config_name], collections.Mapping):
raise ConfigurationError(
'{0} must be an object configuration'.format(prefix))
else:
# config_name is a pre-existing dictionary in parent_config
pass
_recurse_config(config, modules, work_in)
def _walk_config(config, modules, f, prefix=''):
"""Recursively walk through a module list.
For every module, calls ``f(config, module, name)`` where
`config` is the configuration scoped to that module, `module`
is the Configurable-like object, and `name` is the complete
path (ending in the module name).
:param dict config: configuration to walk and possibly update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param f: callback function for each module
:param str prefix: prefix name of the config
:return: config
"""
def work_in(parent_config, config_name, prefix, module):
# create_config_tree() needs to have been called by now
# and you should never hit either of these asserts
if config_name not in parent_config:
raise ProgrammerError('{0} not present in configuration'
.format(prefix))
if not isinstance(parent_config[config_name], collections.Mapping):
raise ConfigurationError(
'{0} must be an object configuration'.format(prefix))
# do the work!
f(parent_config[config_name], module, prefix)
return _recurse_config(config, modules, work_in)
def collect_add_argparse(parser, modules):
"""Add all command-line options.
`modules` is an iterable of
:class:`yakonfig.configurable.Configurable` objects, or anything
equivalently typed. This calls
:meth:`~yakonfig.configurable.Configurable.add_arguments` (if
present) on all of them to set the global command-line arguments.
:param argparse.ArgumentParser parser: argparse parser
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
"""
def work_in(parent_config, config_name, prefix, module):
f = getattr(module, 'add_arguments', None)
if f is not None:
f(parser)
_recurse_config(dict(), modules, work_in)
return parser
def assemble_default_config(modules):
"""Build the default configuration from a set of modules.
`modules` is an iterable of
:class:`yakonfig.configurable.Configurable` objects, or anything
equivalently typed. This produces the default configuration from
that list of modules.
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:return: configuration dictionary
"""
def work_in(parent_config, config_name, prefix, module):
my_config = dict(getattr(module, 'default_config', {}))
if config_name in parent_config:
extra_config = parent_config[config_name]
raise ProgrammerError(
'config for {0} already present when about to fetch {3}.default_config (had {1!r} would have set {2!r})'.format(
prefix, extra_config, my_config, module))
parent_config[config_name] = my_config
return _recurse_config(dict(), modules, work_in)
def fill_in_arguments(config, modules, args):
"""Fill in configuration fields from command-line arguments.
`config` is a dictionary holding the initial configuration,
probably the result of :func:`assemble_default_config`. It reads
through `modules`, and for each, fills in any configuration values
that are provided in `args`.
`config` is modified in place. `args` may be either a dictionary
or an object (as the result of :mod:`argparse`).
:param dict config: configuration tree to update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param args: command-line objects
:paramtype args: dict or object
:return: config
"""
def work_in(config, module, name):
rkeys = getattr(module, 'runtime_keys', {})
for (attr, cname) in iteritems(rkeys):
v = args.get(attr, None)
if v is not None:
config[cname] = v
if not isinstance(args, collections.Mapping):
args = vars(args)
return _walk_config(config, modules, work_in)
def do_config_discovery(config, modules):
'''Let modules detect additional configuration values.
`config` is the initial dictionary with command-line and
file-derived values, but nothing else, filled in. This calls
:meth:`yakonfig.configurable.Configurable.discover_config` on
every configuration module. It is expect that this method will
modify the passed-in configuration dictionaries in place.
:param dict config: configuration tree to update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:return: `config`
'''
def work_in(config, module, name):
f = getattr(module, 'discover_config', None)
if f:
f(config, name)
return _walk_config(config, modules, work_in)
def normalize_config(config, modules):
"""Normalize configuration values in the entire tree.
`config` is a dictionary holding the almost-final configuration.
Each module's
:method:`yakonfig.configurable.Configurable.normalize_config`
function is called to make changes such as pushing configuration
into sub-module configuration blocks and making file paths
absolute.
:param dict config: configuration tree to update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
"""
def work_in(config, module, name):
f = getattr(module, 'normalize_config', None)
if f:
f(config)
return _walk_config(config, modules, work_in)
|
|
# -*- coding: utf-8 -*-
from datetime import date
import json
from operator import itemgetter
import os
import warnings
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import signals, Model, ManyToManyField
from django.db.models.base import ModelBase
try:
# Django >= 1.8, < 1.9
from django.db.models.fields.related import (
ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor
)
except ImportError:
# Django >= 1.9
from django.db.models.fields.related import ForwardManyToOneDescriptor
from django.utils import six, timezone
from django.utils.six import text_type
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.six.moves import filter
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import DontUsePageAttributeWarning
from cms.models.placeholdermodel import Placeholder
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import admin_reverse
from treebeard.mp_tree import MP_Node
class BoundRenderMeta(object):
def __init__(self, meta):
self.index = 0
self.total = 1
self.text_enabled = getattr(meta, 'text_enabled', False)
class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side
of a one-to-one relation.
In the example::
class MyPlugin(CMSPlugin):
cmsplugin_ptr = ForeignKey(CMSPlugin, parent_link=True)
``myplugin.cmsplugin_ptr`` is a ``ForwardOneToOneDescriptor`` instance.
"""
# This class is necessary to backport the following Django fix
# https://github.com/django/django/commit/38575b007a722d6af510ea46d46393a4cda9ca29
# into the CMS.
def get_inherited_object(self, instance):
"""
Returns an instance of the subclassed model
in a multi-table inheritance scenario.
"""
# This is an exact copy of the code for get_object()
# provided in the commit above.
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.rel.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
return rel_model(**kwargs)
return
def __get__(self, instance, instance_type=None):
if instance is None:
return self
if not hasattr(instance, self.cache_name):
# No cached object is present on the instance.
val = self.field.get_local_related_value(instance)
if None not in val:
# Fetch the inherited object instance
# using values from the current instance.
# This avoids an extra db call because we already
# have the data.
# This can be None if a field from the base class (CMSPlugin)
# was deferred.
rel_obj = self.get_inherited_object(instance)
if not rel_obj is None:
# Populate the internal relationship cache.
setattr(instance, self.cache_name, rel_obj)
return super(ForwardOneToOneDescriptor, self).__get__(instance, instance_type)
class PluginModelBase(ModelBase):
"""
Metaclass for all CMSPlugin subclasses. This class should not be used for
any other type of models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(PluginModelBase, cls).__new__
# remove RenderMeta from the plugin class
attr_meta = attrs.pop('RenderMeta', None)
# Only care about subclasses of CMSPlugin
# (excluding CMSPlugin itself).
parents = [b for b in bases if isinstance(b, PluginModelBase)]
if parents and 'cmsplugin_ptr' not in attrs:
# The current class subclasses from CMSPlugin
# and has not defined a cmsplugin_ptr field.
meta = attrs.get('Meta', None)
proxy = getattr(meta, 'proxy', False)
# True if any of the base classes defines a cmsplugin_ptr field.
field_is_inherited = any(hasattr(parent, 'cmsplugin_ptr') for parent in parents)
# Skip proxied classes which are not autonomous ORM objects
# We don't skip abstract classes because when a plugin
# inherits from an abstract class, we need to make sure the
# abstract class gets the correct related name, otherwise the
# plugin inherits the default related name and then the
# field_is_inherited check above will prevent us from adding
# the fixed related name.
if not proxy and not field_is_inherited:
# It's important to set the field as if it was set
# manually in the model class.
# This is because Django will do a lot of operations
# under the hood to set the forward and reverse relations.
attrs['cmsplugin_ptr'] = models.OneToOneField(
to='cms.CMSPlugin',
name='cmsplugin_ptr',
related_name='%(app_label)s_%(class)s',
auto_created=True,
parent_link=True,
)
# create a new class (using the super-metaclass)
new_class = super_new(cls, name, bases, attrs)
# Skip abstract and proxied classes which are not autonomous ORM objects
if parents and not new_class._meta.abstract and not new_class._meta.proxy:
# Use our patched descriptor regardless of how the one to one
# relationship was defined.
parent_link_field = new_class._meta.get_field('cmsplugin_ptr')
setattr(new_class, 'cmsplugin_ptr', ForwardOneToOneDescriptor(parent_link_field))
# if there is a RenderMeta in attrs, use this one
# else try to use the one from the superclass (if present)
meta = attr_meta or getattr(new_class, '_render_meta', None)
treebeard_view_fields = (f for f in new_class._meta.fields
if f.name in ('depth', 'numchild', 'path'))
for field in treebeard_view_fields:
field.editable = False
# set a new BoundRenderMeta to prevent leaking of state
new_class._render_meta = BoundRenderMeta(meta)
return new_class
@python_2_unicode_compatible
class CMSPlugin(six.with_metaclass(PluginModelBase, MP_Node)):
'''
The base class for a CMS plugin model. When defining a new custom plugin, you should
store plugin-instance specific information on a subclass of this class.
An example for this would be to store the number of pictures to display in a galery.
Two restrictions apply when subclassing this to use in your own models:
1. Subclasses of CMSPlugin *cannot be further subclassed*
2. Subclasses of CMSPlugin cannot define a "text" field.
'''
placeholder = models.ForeignKey(Placeholder, on_delete=models.CASCADE, editable=False, null=True)
parent = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True, editable=False)
position = models.PositiveSmallIntegerField(_("position"), default = 0, editable=False)
language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False)
plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False)
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
changed_date = models.DateTimeField(auto_now=True)
child_plugin_instances = None
translatable_content_excluded_fields = []
class Meta:
app_label = 'cms'
class RenderMeta:
index = 0
total = 1
text_enabled = False
def __str__(self):
return force_text(self.pk)
def __repr__(self):
display = "<{module}.{class_name} id={id} plugin_type='{plugin_type}' object at {location}>".format(
module=self.__module__,
class_name=self.__class__.__name__,
id=self.pk,
plugin_type=(self.plugin_type),
location=hex(id(self)),
)
return display
def get_plugin_name(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type).name
def get_short_description(self):
instance = self.get_plugin_instance()[0]
if instance is not None:
return force_text(instance)
return _("<Empty>")
def get_plugin_class(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type)
def get_plugin_class_instance(self, admin=None):
plugin_class = self.get_plugin_class()
# needed so we have the same signature as the original ModelAdmin
return plugin_class(plugin_class.model, admin)
def get_plugin_instance(self, admin=None):
'''
Given a plugin instance (usually as a CMSPluginBase), this method
returns a tuple containing:
instance - The instance AS THE APPROPRIATE SUBCLASS OF
CMSPluginBase and not necessarily just 'self', which is
often just a CMSPluginBase,
plugin - the associated plugin class instance (subclass
of CMSPlugin)
'''
plugin = self.get_plugin_class_instance(admin)
try:
instance = self.get_bound_plugin()
except ObjectDoesNotExist:
instance = None
self._inst = None
return (instance, plugin)
def get_bound_plugin(self):
"""
Returns an instance of the plugin model
configured for this plugin type.
"""
if hasattr(self, "_inst"):
return self._inst
plugin = self.get_plugin_class()
if plugin.model != self.__class__:
self._inst = plugin.model.objects.get(cmsplugin_ptr=self)
self._inst._render_meta = self._render_meta
else:
self._inst = self
return self._inst
def get_plugin_info(self, children=None, parents=None):
plugin_name = self.get_plugin_name()
data = {
'type': 'plugin',
'placeholder_id': text_type(self.placeholder_id),
'plugin_name': force_text(plugin_name) or '',
'plugin_type': self.plugin_type,
'plugin_id': text_type(self.pk),
'plugin_language': self.language or '',
'plugin_parent': text_type(self.parent_id or ''),
'plugin_restriction': children or [],
'plugin_parent_restriction': parents or [],
'urls': self.get_action_urls(),
}
return data
def refresh_from_db(self, *args, **kwargs):
super(CMSPlugin, self).refresh_from_db(*args, **kwargs)
# Delete this internal cache to let the cms populate it
# on demand.
try:
del self._inst
except AttributeError:
pass
def get_media_path(self, filename):
pages = self.placeholder.page_set.all()
if pages.exists():
return pages[0].get_media_path(filename)
else: # django 1.0.2 compatibility
today = date.today()
return os.path.join(get_cms_setting('PAGE_MEDIA_PATH'),
str(today.year), str(today.month), str(today.day), filename)
@property
def page(self):
warnings.warn(
"Don't use the page attribute on CMSPlugins! CMSPlugins are not "
"guaranteed to have a page associated with them!",
DontUsePageAttributeWarning,
stacklevel=2,
)
return self.placeholder.page if self.placeholder_id else None
def get_instance_icon_src(self):
"""
Get src URL for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return plugin.icon_src(instance) if instance else u''
def get_instance_icon_alt(self):
"""
Get alt text for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return force_text(plugin.icon_alt(instance)) if instance else u''
def update(self, refresh=False, **fields):
CMSPlugin.objects.filter(pk=self.pk).update(**fields)
if refresh:
return self.reload()
return
def save(self, no_signals=False, *args, **kwargs):
if not self.depth:
if self.parent_id or self.parent:
self.parent.add_child(instance=self)
else:
if not self.position and not self.position == 0:
self.position = CMSPlugin.objects.filter(parent__isnull=True,
language=self.language,
placeholder_id=self.placeholder_id).count()
self.add_root(instance=self)
return
super(CMSPlugin, self).save(*args, **kwargs)
def reload(self):
return CMSPlugin.objects.get(pk=self.pk)
def move(self, target, pos=None):
super(CMSPlugin, self).move(target, pos)
self = self.reload()
try:
new_pos = max(CMSPlugin.objects.filter(parent_id=self.parent_id,
placeholder_id=self.placeholder_id,
language=self.language).exclude(pk=self.pk).order_by('depth', 'path').values_list('position', flat=True)) + 1
except ValueError:
# This is the first plugin in the set
new_pos = 0
return self.update(refresh=True, position=new_pos)
def set_base_attr(self, plugin):
for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'depth', 'path',
'numchild', 'pk', 'position']:
setattr(plugin, attr, getattr(self, attr))
def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False):
"""
Copy this plugin and return the new plugin.
The logic of this method is the following:
# get a new generic plugin instance
# assign the position in the plugin tree
# save it to let mptt/treebeard calculate the tree attributes
# then get a copy of the current plugin instance
# assign to it the id of the generic plugin instance above;
this will effectively change the generic plugin created above
into a concrete one
# copy the tree related attributes from the generic plugin to
the concrete one
# save the concrete plugin
# trigger the copy relations
# return the generic plugin instance
This copy logic is required because we don't know what the fields of
the real plugin are. By getting another instance of it at step 4 and
then overwriting its ID at step 5, the ORM will copy the custom
fields for us.
"""
try:
plugin_instance, cls = self.get_plugin_instance()
except KeyError: # plugin type not found anymore
return
# set up some basic attributes on the new_plugin
new_plugin = CMSPlugin()
new_plugin.placeholder = target_placeholder
# we assign a parent to our new plugin
parent_cache[self.pk] = new_plugin
if self.parent:
parent = parent_cache[self.parent_id]
parent = CMSPlugin.objects.get(pk=parent.pk)
new_plugin.parent_id = parent.pk
new_plugin.parent = parent
new_plugin.language = target_language
new_plugin.plugin_type = self.plugin_type
if no_signals:
from cms.signals import pre_save_plugins
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin)
new_plugin._no_reorder = True
new_plugin.save()
if plugin_instance:
# get a new instance so references do not get mixed up
plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk)
plugin_instance.pk = new_plugin.pk
plugin_instance.id = new_plugin.pk
plugin_instance.placeholder = target_placeholder
plugin_instance.cmsplugin_ptr = new_plugin
plugin_instance.language = target_language
plugin_instance.parent = new_plugin.parent
plugin_instance.depth = new_plugin.depth
plugin_instance.path = new_plugin.path
plugin_instance.numchild = new_plugin.numchild
plugin_instance._no_reorder = True
plugin_instance.save()
old_instance = plugin_instance.__class__.objects.get(pk=self.pk)
plugin_instance.copy_relations(old_instance)
if no_signals:
signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
return new_plugin
@classmethod
def fix_tree(cls, destructive=False):
"""
Fixes the plugin tree by first calling treebeard fix_tree and the
recalculating the correct position property for each plugin.
"""
from cms.utils.plugins import reorder_plugins
super(CMSPlugin, cls).fix_tree(destructive)
for placeholder in Placeholder.objects.all():
for language, __ in settings.LANGUAGES:
order = CMSPlugin.objects.filter(
placeholder_id=placeholder.pk, language=language,
parent_id__isnull=True
).order_by('position', 'path').values_list('pk', flat=True)
reorder_plugins(placeholder, None, language, order)
for plugin in CMSPlugin.objects.filter(
placeholder_id=placeholder.pk,
language=language).order_by('depth', 'path'):
order = CMSPlugin.objects.filter(
parent_id=plugin.pk
).order_by('position', 'path').values_list('pk', flat=True)
reorder_plugins(placeholder, plugin.pk, language, order)
def post_copy(self, old_instance, new_old_ziplist):
"""
Handle more advanced cases (eg Text Plugins) after the original is
copied
"""
pass
def copy_relations(self, old_instance):
"""
Handle copying of any relations attached to this plugin. Custom plugins
have to do this themselves!
"""
pass
@classmethod
def _get_related_objects(cls):
fields = cls._meta._get_fields(
forward=False, reverse=True,
include_parents=True,
include_hidden=False,
)
return list(obj for obj in fields if not isinstance(obj.field, ManyToManyField))
def get_position_in_placeholder(self):
"""
1 based position!
"""
return self.position + 1
def get_breadcrumb(self):
from cms.models import Page
model = self.placeholder._get_attached_model() or Page
breadcrumb = []
for parent in self.get_ancestors():
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[parent.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[parent.pk]))
breadcrumb.append({'title': force_text(parent.get_plugin_name()), 'url': url})
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[self.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[self.pk]))
breadcrumb.append({'title': force_text(self.get_plugin_name()), 'url': url})
return breadcrumb
def get_breadcrumb_json(self):
result = json.dumps(self.get_breadcrumb())
result = mark_safe(result)
return result
def num_children(self):
return self.numchild
def notify_on_autoadd(self, request, conf):
"""
Method called when we auto add this plugin via default_plugins in
CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when they are
auto added.
"""
pass
def notify_on_autoadd_children(self, request, conf, children):
"""
Method called when we auto add children to this plugin via
default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when we add
children to them. ie : TextPlugin must update its content to add HTML
tags to be able to see his children in WYSIWYG.
"""
pass
def get_translatable_content(self):
"""
Returns {field_name: field_contents} for translatable fields, where
field_contents > ''
"""
fields = (f for f in self._meta.fields
if isinstance(f, (models.CharField, models.TextField)) and
f.editable and not f.choices and
f.name not in self.translatable_content_excluded_fields)
return dict(filter(itemgetter(1),
((f.name, getattr(self, f.name)) for f in fields)))
def set_translatable_content(self, fields):
for field, value in fields.items():
setattr(self, field, value)
self.save()
return all(getattr(self, field) == value
for field, value in fields.items())
def delete(self, no_mp=False, *args, **kwargs):
if no_mp:
Model.delete(self, *args, **kwargs)
else:
super(CMSPlugin, self).delete(*args, **kwargs)
def get_action_urls(self, js_compat=True):
if js_compat:
# TODO: Remove this condition
# once the javascript files have been refactored
# to use the new naming schema (ending in _url).
data = {
'edit_plugin': self.get_edit_url(),
'add_plugin': self.get_add_url(),
'delete_plugin': self.get_delete_url(),
'move_plugin': self.get_move_url(),
'copy_plugin': self.get_copy_url(),
}
else:
data = {
'edit_url': self.get_edit_url(),
'add_url': self.get_add_url(),
'delete_url': self.get_delete_url(),
'move_url': self.get_move_url(),
'copy_url': self.get_copy_url(),
}
return data
def get_add_url(self):
return self.placeholder.get_add_url()
def get_edit_url(self):
return self.placeholder.get_edit_url(self.pk)
def get_delete_url(self):
return self.placeholder.get_delete_url(self.pk)
def get_move_url(self):
return self.placeholder.get_move_url()
def get_copy_url(self):
return self.placeholder.get_copy_url()
def get_plugin_media_path(instance, filename):
"""
Django requires that unbound function used in fields' definitions to be
defined outside the parent class.
(see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values)
This function is used withing field definition:
file = models.FileField(_("file"), upload_to=get_plugin_media_path)
and it invokes the bounded method on the given instance at runtime
"""
return instance.get_media_path(filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.