repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Tong-Chen/scikit-learn | refs/heads/master | sklearn/cluster/k_means_.py | 1 | """K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms
from ..utils.sparsefuncs import assign_rows_csr, mean_variance_axis0
from ..utils import check_arrays
from ..utils import check_random_state
from ..utils import atleast2d_or_csr
from ..utils import as_float_array
from ..utils import gen_batches
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis0(X)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances=True,
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1):
"""K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
if copy_x:
X = X.copy()
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if not n_init == 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in the k-means instead of %d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print('Initialization complete')
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print('Iteration %2d, inertia %.3f' % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if np.sum((centers_old - centers) ** 2) < tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers
def _labels_inertia_precompute_dense(X, x_squared_norms, centers):
n_samples = X.shape[0]
k = centers.shape[0]
distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm
Compute the labels and the inertia of the given samples and centers
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia: float
The value of the inertia criterion with the assignment
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = - np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : boolean
Precompute distances (faster but takes more memory).
tol : float, optional default: 1e-4
Relative tolerance w.r.t. inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
`cluster_centers_` : array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_` :
Labels of each point
`inertia_` : float
The value of the inertia criterion associated with the chosen
partition.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances=True,
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = atleast2d_or_csr(X, dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = atleast2d_or_csr(X)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
if not X.dtype.kind is 'f':
warnings.warn("Got data type %s, converted to float "
"to avoid overflows" % X.dtype,
RuntimeWarning, stacklevel=2)
X = X.astype(np.float)
return X
def _check_fitted(self):
if not hasattr(self, "cluster_centers_"):
raise AttributeError("Model has not been trained yet.")
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_ = k_means(
X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init,
max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
self._check_fitted()
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
self._check_fitted()
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
self._check_fitted()
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm
Parameters
----------
X: array, shape (n_samples, n_features)
The original data array.
x_squared_norms: array, shape (n_samples,)
Squared euclidean norm of each data point.
centers: array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts: array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances: array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to it's closest center.
May not be None when random_reassign is True.
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign: boolean, optional
If True, centers with very low counts are
randomly-reassigned to observations in dense areas.
reassignment_ratio: float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose: bool, optional
Controls the verbosity
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = np.logical_or(
(counts <= 1), counts <= reassignment_ratio * counts.max())
n_reassigns = min(to_reassign.sum(), X.shape[0])
if n_reassigns:
# Pick new clusters amongst observations with probability
# proportional to their closeness to their center.
# Flip the ordering of the distances.
distances -= distances.max()
distances *= -1
rand_vals = random_state.rand(n_reassigns)
rand_vals *= distances.sum()
new_centers = np.searchsorted(distances.cumsum(),
rand_vals)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X, new_centers, np.where(to_reassign)[0],
centers)
else:
centers[to_reassign] = X[new_centers]
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff < tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if (ewa_inertia_min is None or ewa_inertia < ewa_inertia_min):
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, optional
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, optional
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
`cluster_centers_` : array, [n_clusters, n_features]
Coordinates of cluster centers
`labels_` :
Labels of each point (if compute_labels is set to True).
`inertia_` : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_arrays(X, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)[0]
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(self.n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, self.n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=distances, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, self.n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_arrays(X, sparse_format="csr", copy=False)[0]
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = check_random_state(self.random_state)
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(self.n_clusters, dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
self._check_fitted()
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
CyanogenMod/android_external_chromium_org | refs/heads/cm-12.0 | third_party/tlslite/tlslite/utils/rc4.py | 149 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Abstract class for RC4."""
class RC4(object):
def __init__(self, keyBytes, implementation):
if len(keyBytes) < 16 or len(keyBytes) > 256:
raise ValueError()
self.isBlockCipher = False
self.name = "rc4"
self.implementation = implementation
def encrypt(self, plaintext):
raise NotImplementedError()
def decrypt(self, ciphertext):
raise NotImplementedError() |
akosyakov/intellij-community | refs/heads/master | python/testData/highlighting/escapedBackslash.py | 83 | str = 'a a \\' |
benjsto/ronquixote | refs/heads/master | djangoappengine/tests/test_not_return_sets.py | 28 | import datetime
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.test import TestCase
from .models import FieldsWithOptionsModel, OrderedModel, \
SelfReferenceModel
class NonReturnSetsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58, 2.4]
emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com', 'itachi@uchia.com']
def setUp(self):
for index, (float, email) in enumerate(zip(NonReturnSetsTest.floats,
NonReturnSetsTest.emails)):
self.last_save_time = datetime.datetime.now().time()
ordered_instance = OrderedModel(priority=index, pk=index + 1)
ordered_instance.save()
model = FieldsWithOptionsModel(floating_point=float,
integer=int(float), email=email,
time=self.last_save_time,
foreign_key=ordered_instance)
model.save()
def test_get(self):
self.assertEquals(
FieldsWithOptionsModel.objects.get(
email='itachi@uchia.com').email,
'itachi@uchia.com')
# Test exception when matching multiple entities.
self.assertRaises(MultipleObjectsReturned,
FieldsWithOptionsModel.objects.get,
integer=2)
# Test exception when entity does not exist.
self.assertRaises(ObjectDoesNotExist,
FieldsWithOptionsModel.objects.get,
floating_point=5.2)
# TODO: Test create when djangos model.save_base is refactored.
# TODO: Test get_or_create when refactored.
def test_count(self):
self.assertEquals(
FieldsWithOptionsModel.objects.filter(integer=2).count(), 2)
def test_in_bulk(self):
self.assertEquals(
[key in ['sharingan@uchias.com', 'itachi@uchia.com']
for key in FieldsWithOptionsModel.objects.in_bulk(
['sharingan@uchias.com', 'itachi@uchia.com']).keys()],
[True, ] * 2)
def test_latest(self):
self.assertEquals(
FieldsWithOptionsModel.objects.latest('time').email,
'itachi@uchia.com')
def test_exists(self):
self.assertEquals(FieldsWithOptionsModel.objects.exists(), True)
def test_deletion(self):
# TODO: ForeignKeys will not be deleted! This has to be done
# via background tasks.
self.assertEquals(FieldsWithOptionsModel.objects.count(), 5)
FieldsWithOptionsModel.objects.get(email='itachi@uchia.com').delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 4)
FieldsWithOptionsModel.objects.filter(email__in=[
'sharingan@uchias.com', 'itachi@uchia.com',
'rasengan@naruto.com', ]).delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 2)
def test_selfref_deletion(self):
entity = SelfReferenceModel()
entity.save()
entity.delete()
def test_foreign_key_fetch(self):
# Test fetching the ForeignKey.
ordered_instance = OrderedModel.objects.get(priority=2)
self.assertEquals(
FieldsWithOptionsModel.objects.get(integer=9).foreign_key,
ordered_instance)
def test_foreign_key_backward(self):
entity = OrderedModel.objects.all()[0]
self.assertEquals(entity.keys.count(), 1)
# TODO: Add should save the added instance transactional via for
# example force_insert.
new_foreign_key = FieldsWithOptionsModel(
floating_point=5.6, integer=3,
email='temp@temp.com', time=datetime.datetime.now())
entity.keys.add(new_foreign_key)
self.assertEquals(entity.keys.count(), 2)
# TODO: Add test for create.
entity.keys.remove(new_foreign_key)
self.assertEquals(entity.keys.count(), 1)
entity.keys.clear()
self.assertTrue(not entity.keys.exists())
entity.keys = [new_foreign_key, new_foreign_key]
self.assertEquals(entity.keys.count(), 1)
self.assertEquals(entity.keys.all()[0].integer, 3)
|
plivo/sharq | refs/heads/master | sharq/utils.py | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Plivo Team. See LICENSE.txt for details.
import time
import msgpack
VALID_IDENTIFIER_SET = set(list('abcdefghijklmnopqrstuvwxyz0123456789_-'))
def is_valid_identifier(identifier):
"""Checks if the given identifier is valid or not. A valid
identifier may consists of the following characters with a
maximum length of 100 characters, minimum of 1 character.
Valid characters for an identifier,
- A to Z
- a to z
- 0 to 9
- _ (underscore)
- - (hypen)
"""
if not isinstance(identifier, str):
return False
if len(identifier) > 100 or len(identifier) < 1:
return False
condensed_form = set(list(identifier.lower()))
return condensed_form.issubset(VALID_IDENTIFIER_SET)
def is_valid_interval(interval):
"""Checks if the given interval is valid. A valid interval
is always a positive, non-zero integer value.
"""
if not isinstance(interval, int):
return False
if interval <= 0:
return False
return True
def is_valid_requeue_limit(requeue_limit):
"""Checks if the given requeue limit is valid.
A valid requeue limit is always greater than
or equal to -1.
"""
if not isinstance(requeue_limit, int):
return False
if requeue_limit <= -2:
return False
return True
def serialize_payload(payload):
"""Tries to serialize the payload using msgpack. If it is
not serializable, raises a TypeError.
"""
return msgpack.packb(payload, use_bin_type=True)
def deserialize_payload(payload):
"""Tries to deserialize the payload using msgpack.
"""
# Handle older SharQ payloads as well (before py3 migration)
if payload.startswith(b'"') and payload.endswith(b'"'):
return msgpack.unpackb(payload[1:-1], raw=False)
return msgpack.unpackb(payload, raw=False)
def generate_epoch():
"""Generates an unix epoch in ms.
"""
return int(time.time() * 1000)
def convert_to_str(queue_set):
"""Takes set and decodes bytes to string"""
queue_list = []
for queue in list(queue_set):
try:
queue_list.append(queue.decode('utf-8'))
except Exception as e:
queue_list.append(queue)
pass
return queue_list
|
jamesmishra/nlp-playground | refs/heads/master | nlp_playground/path.py | 1 | """
Find where this package is running on the system.
This module is a singleton to compute the BASE_DIR.
You can change this value if we're executing outside
the typical directory structure.
"""
import os
CUSTOM_PATH = None
def base_path():
"""
Return the base location of the project.
This is supposed to be the parent of the data directory
and some other stuff.
Set `nlp_playground.path.CUSTOM_PATH` to override our default
assumption.
"""
return CUSTOM_PATH or os.path.dirname(os.path.dirname(__file__))
|
knneth/teacup | refs/heads/master | example_configs/config-scenario5.py | 1 | # Three tcp flows between 3 pairs of hosts with staggered start, tcp congestion
# algorithms are varied
#
# $Id: $
import sys
import datetime
from fabric.api import env
#
# Fabric config
#
# User and password
env.user = 'root'
env.password = 'rootpw'
# Set shell used to execute commands
env.shell = '/bin/sh -c'
#
# Testbed config
#
# Path to teacup scripts
TPCONF_script_path = '/home/teacup/teacup-0.8'
# DO NOT remove the following line
sys.path.append(TPCONF_script_path)
# Set debugging level (0 = no debugging info output)
TPCONF_debug_level = 0
# Host lists
TPCONF_router = ['newtcprt3', ]
TPCONF_hosts = [ 'newtcp20', 'newtcp21', 'newtcp22', 'newtcp27', 'newtcp28', 'newtcp29', ]
# Map external IPs to internal IPs
TPCONF_host_internal_ip = {
'newtcprt3': ['172.16.10.1', '172.16.11.1'],
'newtcp20': ['172.16.10.60'],
'newtcp21': ['172.16.10.61'],
'newtcp22': ['172.16.10.62'],
'newtcp27': ['172.16.11.67'],
'newtcp28': ['172.16.11.68'],
'newtcp29': ['172.16.11.69'],
}
#
# Reboot configuration
#
#
# Experiment settings
#
# Maximum allowed time difference between machines in seconds
# otherwise experiment will abort cause synchronisation problems
TPCONF_max_time_diff = 1
# Experiment name prefix used if not set on the command line
# The command line setting will overrule this config setting
now = datetime.datetime.today()
TPCONF_test_id = now.strftime("%Y%m%d-%H%M%S") + '_scenario5'
# Directory to store log files on remote host
TPCONF_remote_dir = '/tmp/'
# Time offset measurement traffic
# Enable broadcast ping on external/control interfaces
TPCONF_bc_ping_enable = '0'
# Specify rate of pings in packets/second
TPCONF_bc_ping_rate = 1
# Specify multicast address to use (must be broadcast or multicast address)
# If this is not specified, byt deafult the ping will be send to the subnet
# broadcast address.
TPCONF_bc_ping_address = '224.0.1.199'
#
# List of router queues/pipes
#
# Each entry is a tuple. The first value is the queue number and the second value
# is a comma separated list of parameters (see routersetup.py:init_pipe()).
# Queue numbers must be unique.
# Note that variable parameters must be either constants or or variable names
# defined by the experimenter. Variables are evaluated during runtime. Variable
# names must start with a 'V_'. Parameter names can only contain numbes, letter
# (upper and lower case), underscores (_), and hypen/minus (-).
# All variables must be defined in TPCONF_variable_list (see below).
# Note parameters must be configured appropriately for the router OS, e.g. there
# is no CoDel on FreeBSD; otherwise the experiment will abort witn an error.
TPCONF_router_queues = [
# Set same delay for every host
('1', " source='172.16.10.0/24', dest='172.16.11.0/24', delay=V_delay, "
" loss=V_loss, rate=V_up_rate, queue_disc=V_aqm, queue_size=V_bsize "),
('2', " source='172.16.11.0/24', dest='172.16.10.0/24', delay=V_delay, "
" loss=V_loss, rate=V_down_rate, queue_disc=V_aqm, queue_size=V_bsize "),
]
#
# List of traffic generators
#
# Each entry is a 3-tuple. the first value of the tuple must be a float and is the
# time relative to the start of the experiment when tasks are excuted. If two tasks
# have the same start time their start order is arbitrary. The second entry of the
# tuple is the task number and must be a unique integer (used as ID for the process).
# The last value of the tuple is a comma separated list of parameters (see the tasks
# defined in trafficgens.py); the first parameter of this list must be the
# task name.
# Client and server can be specified using the external/control IP addresses or host
# names. Then the actual interface used is the _first_ internal address (according to
# TPCONF_host_internal_ip). Alternativly, client and server can be specified as
# internal addresses, which allows to use any internal interfaces configured.
traffic_iperf = [
# Specifying external addresses traffic will be created using the _first_
# internal addresses (according to TPCONF_host_internal_ip)
('0.0', '1', " start_iperf, client='newtcp27', server='newtcp20', port=5000, "
" duration=V_duration "),
('10.0', '2', " start_iperf, client='newtcp28', server='newtcp21', port=5001, "
" duration=V_duration "),
('20.0', '3', " start_iperf, client='newtcp29', server='newtcp22', port=5002, "
" duration=V_duration "),
]
# THIS is the traffic generator setup we will use
TPCONF_traffic_gens = traffic_iperf
#
# Traffic parameters
#
# Duration in seconds of traffic
TPCONF_duration = 30
# Number of runs for each setting
TPCONF_runs = 1
# TCP congestion control algorithm used
# Possible algos are: default, host<N>, newreno, cubic, cdg, hd, htcp, compound, vegas
# Note that the algo support is OS specific, so must ensure the right OS is booted
# Windows: newreno (default), compound
# FreeBSD: newreno (default), cubic, hd, htcp, cdg, vegas
# Linux: newreno, cubic (default), htcp, vegas
# Mac: newreno
# If you specify 'default' the default algorithm depending on the OS will be used
# If you specify 'host<N>' where <N> is an integer starting from 0 to then the
# algorithm will be the N-th algorithm specified for the host in TPCONF_host_TCP_algos
# (in case <N> is larger then the number of algorithms specified, it is set to 0
TPCONF_TCP_algos = ['newreno', 'cubic', ]
# Specify TCP congestion control algorithms used on each host
TPCONF_host_TCP_algos = {
}
# Specify TCP parameters for each host and each TCP congestion control algorithm
# Each parameter is of the form <sysctl name> = <value> where <value> can be a constant
# or a V_ variable
TPCONF_host_TCP_algo_params = {
}
# Specify arbitray commands that are executed on a host at the end of the host
# intialisation (after general host setup, ecn and tcp setup). The commands are
# executed in the shell as written after any V_ variables have been replaced.
# LIMITATION: only one V_ variable per command
TPCONF_host_init_custom_cmds = {
}
# Emulated delays in ms
TPCONF_delays = [0, 25, 50]
# Emulated loss rates
TPCONF_loss_rates = [0]
# Emulated bandwidths (downstream, upstream)
TPCONF_bandwidths = [
('8mbit', '1mbit'),
('20mbit', '1.4mbit'),
]
# AQM
# Linux: fifo (mapped to pfifo), pfifo, bfifo, fq_codel, codel, pie, red, ...
# (see tc man page for full list)
# FreeBSD: fifo, red
TPCONF_aqms = ['pfifo', ]
# Buffer size
# If router is Linux this is mostly in packets/slots, but it depends on AQM
# (e.g. for bfifo it's bytes)
# If router is FreeBSD this would be in slots by default, but we can specify byte sizes
# (e.g. we can specify 4Kbytes)
TPCONF_buffer_sizes = [100]
#
# List of all parameters that can be varied and default values
#
# The key of each item is the identifier that can be used in TPCONF_vary_parameters
# (see below).
# The value of each item is a 4-tuple. First, a list of variable names.
# Second, a list of short names uses for the file names.
# For each parameter varied a string '_<short_name>_<value>' is appended to the log
# file names (appended to chosen prefix). Note, short names should only be letters
# from a-z or A-Z. Do not use underscores or hyphens!
# Third, the list of parameters values. If there is more than one variable this must
# be a list of tuples, each tuple having the same number of items as teh number of
# variables. Fourth, an optional dictionary with additional variables, where the keys
# are the variable names and the values are the variable values.
TPCONF_parameter_list = {
# Vary name V_ variable file name values extra vars
'delays' : (['V_delay'], ['del'], TPCONF_delays, {}),
'loss' : (['V_loss'], ['loss'], TPCONF_loss_rates, {}),
'tcpalgos' : (['V_tcp_cc_algo'],['tcp'], TPCONF_TCP_algos, {}),
'aqms' : (['V_aqm'], ['aqm'], TPCONF_aqms, {}),
'bsizes' : (['V_bsize'], ['bs'], TPCONF_buffer_sizes, {}),
'runs' : (['V_runs'], ['run'], range(TPCONF_runs), {}),
'bandwidths' : (['V_down_rate', 'V_up_rate'], ['down', 'up'], TPCONF_bandwidths, {}),
}
# Default setting for variables (used for variables if not varied)
# The key of each item is the parameter name. The value of each item is the default
# parameter value used if the variable is not varied.
TPCONF_variable_defaults = {
# V_ variable value
'V_duration' : TPCONF_duration,
'V_delay' : TPCONF_delays[0],
'V_loss' : TPCONF_loss_rates[0],
'V_tcp_cc_algo' : TPCONF_TCP_algos[0],
'V_down_rate' : TPCONF_bandwidths[0][0],
'V_up_rate' : TPCONF_bandwidths[0][1],
'V_aqm' : TPCONF_aqms[0],
'V_bsize' : TPCONF_buffer_sizes[0],
}
# Specify the parameters we vary through all values, all others will be fixed
# according to TPCONF_variable_defaults
TPCONF_vary_parameters = ['tcpalgos', 'delays', 'bandwidths', 'aqms', 'runs',]
|
sutartmelson/girder | refs/heads/master | plugins/thumbnails/plugin_tests/__init__.py | 12133432 | |
GinnyN/towerofdimensions-django | refs/heads/master | django-allauth/build/lib/allauth/socialaccount/providers/oauth2/__init__.py | 12133432 | |
vicky2135/lucious | refs/heads/master | tests/functional/basket/__init__.py | 12133432 | |
iamutkarshtiwari/sympy | refs/heads/master | sympy/physics/tests/test_qho_1d.py | 88 | from sympy import exp, integrate, oo, Rational, pi, S, simplify, sqrt, Symbol
from sympy.core.compatibility import range
from sympy.abc import omega, m, x
from sympy.physics.qho_1d import psi_n, E_n, coherent_state
from sympy.physics.quantum.constants import hbar
nu = m * omega / hbar
def test_wavefunction():
Psi = {
0: (nu/pi)**(S(1)/4) * exp(-nu * x**2 /2),
1: (nu/pi)**(S(1)/4) * sqrt(2*nu) * x * exp(-nu * x**2 /2),
2: (nu/pi)**(S(1)/4) * (2 * nu * x**2 - 1)/sqrt(2) * exp(-nu * x**2 /2),
3: (nu/pi)**(S(1)/4) * sqrt(nu/3) * (2 * nu * x**3 - 3 * x) * exp(-nu * x**2 /2)
}
for n in Psi:
assert simplify(psi_n(n, x, m, omega) - Psi[n]) == 0
def test_norm(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert integrate(psi_n(i, x, 1, 1)**2, (x, -oo, oo)) == 1
def test_orthogonality(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
for j in range(i + 1, n + 1):
assert integrate(
psi_n(i, x, 1, 1)*psi_n(j, x, 1, 1), (x, -oo, oo)) == 0
def test_energies(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert E_n(i, omega) == hbar * omega * (i + Rational(1, 2))
def test_coherent_state(n=10):
# Maximum "n" which is tested:
# test whether coherent state is the eigenstate of annihilation operator
alpha = Symbol("alpha")
for i in range(n + 1):
assert simplify(sqrt(n + 1) * coherent_state(n + 1, alpha)) == simplify(alpha * coherent_state(n, alpha))
|
Azure/azure-storage-python | refs/heads/master | azure-storage-blob/azure/storage/blob/baseblobservice.py | 1 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
import uuid
from abc import ABCMeta
from azure.common import AzureHttpError
from azure.storage.common._auth import (
_StorageSASAuthentication,
_StorageSharedKeyAuthentication,
_StorageNoAuthentication,
)
from azure.storage.common._common_conversion import (
_int_to_str,
_to_str,
_datetime_to_utc_string,
)
from azure.storage.common._connection import _ServiceParameters
from azure.storage.common._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from azure.storage.common._deserialization import (
_convert_xml_to_service_properties,
_parse_metadata,
_parse_properties,
_convert_xml_to_service_stats,
_parse_length_from_content_range,
)
from azure.storage.common._error import (
_dont_fail_not_exist,
_dont_fail_on_exist,
_validate_not_none,
_validate_decryption_required,
_validate_access_policies,
_ERROR_PARALLEL_NOT_SEEKABLE,
_validate_user_delegation_key,
)
from azure.storage.common._http import HTTPRequest
from azure.storage.common._serialization import (
_get_request_body,
_convert_signed_identifiers_to_xml,
_convert_service_properties_to_xml,
_add_metadata_headers,
_update_request, _add_date_header)
from azure.storage.common.models import (
Services,
ListGenerator,
_OperationContext,
)
from .sharedaccesssignature import (
BlobSharedAccessSignature,
)
from azure.storage.common.storageclient import StorageClient
from ._deserialization import (
_convert_xml_to_containers,
_parse_blob,
_convert_xml_to_blob_list,
_convert_xml_to_blob_name_list,
_parse_container,
_parse_snapshot_blob,
_parse_lease,
_convert_xml_to_signed_identifiers_and_access,
_parse_base_properties,
_parse_account_information,
_convert_xml_to_user_delegation_key,
_ingest_batch_response)
from ._download_chunking import _download_blob_chunks
from ._error import (
_ERROR_INVALID_LEASE_DURATION,
_ERROR_INVALID_LEASE_BREAK_PERIOD,
)
from ._serialization import (
_get_path,
_validate_and_format_range_headers,
_convert_delegation_key_info_to_xml,
_get_batch_request_delimiter,
_serialize_batch_body,
_validate_and_add_cpk_headers,
)
from .models import (
BlobProperties,
_LeaseActions,
ContainerPermissions,
BlobPermissions,
)
from ._constants import (
X_MS_VERSION,
__version__ as package_version,
)
_CONTAINER_ALREADY_EXISTS_ERROR_CODE = 'ContainerAlreadyExists'
_BLOB_NOT_FOUND_ERROR_CODE = 'BlobNotFound'
_CONTAINER_NOT_FOUND_ERROR_CODE = 'ContainerNotFound'
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
class BaseBlobService(StorageClient):
'''
This is the main class managing Blob resources.
The Blob service stores text and binary data as blobs in the cloud.
The Blob service offers the following three resources: the storage account,
containers, and blobs. Within your storage account, containers provide a
way to organize sets of blobs. For more information please see:
https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx
:ivar int MAX_SINGLE_GET_SIZE:
The size of the first range get performed by get_blob_to_* methods if
max_connections is greater than 1. Less data will be returned if the
blob is smaller than this.
:ivar int MAX_CHUNK_GET_SIZE:
The size of subsequent range gets performed by get_blob_to_* methods if
max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE.
Less data will be returned if the remainder of the blob is smaller than
this. If this is set to larger than 4MB, content_validation will throw an
error if enabled. However, if content_validation is not desired a size
greater than 4MB may be optimal. Setting this below 4MB is not recommended.
:ivar object key_encryption_key:
The key-encryption-key optionally provided by the user. If provided, will be used to
encrypt/decrypt in supported methods.
For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
If both are provided, the resolver will take precedence.
Must implement the following methods for APIs requiring encryption:
wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
Must implement the following methods for APIs requiring decryption:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:ivar function key_resolver_function(kid):
A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
For methods requiring decryption, either the key_encryption_key OR
the resolver must be provided. If both are provided, the resolver will take precedence.
It uses the kid string to return a key-encryption-key implementing the interface defined above.
:ivar bool require_encryption:
A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
successfully read from the queue are/were encrypted while on the server. If this flag is set, all required
parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
'''
__metaclass__ = ABCMeta
MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024
MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024
def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False,
protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None,
connection_string=None, socket_timeout=None, token_credential=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given, or if a custom
domain is used with anonymous authentication.
:param str account_key:
The storage account key. This is used for shared key authentication.
If neither account key or sas token is specified, anonymous access
will be used.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign. If neither are
specified, anonymous access will be used.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param str custom_domain:
The custom domain to use. This can be set in the Azure Portal. For
example, 'www.mydomain.com'.
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
:param token_credential:
A token credential used to authenticate HTTPS requests. The token value
should be updated before its expiration.
:type `~azure.storage.common.TokenCredential`
'''
service_params = _ServiceParameters.get_service_parameters(
'blob',
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
token_credential=token_credential,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
custom_domain=custom_domain,
request_session=request_session,
connection_string=connection_string,
socket_timeout=socket_timeout)
super(BaseBlobService, self).__init__(service_params)
if self.account_key:
self.authentication = _StorageSharedKeyAuthentication(
self.account_name,
self.account_key,
self.is_emulated
)
elif self.sas_token:
self.authentication = _StorageSASAuthentication(self.sas_token)
elif self.token_credential:
self.authentication = self.token_credential
else:
self.authentication = _StorageNoAuthentication()
self.require_encryption = False
self.key_encryption_key = None
self.key_resolver_function = None
self._X_MS_VERSION = X_MS_VERSION
self._update_user_agent_string(package_version)
def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None):
'''
Creates the url to access a blob.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param str protocol:
Protocol to use: 'http' or 'https'. If not specified, uses the
protocol specified when BaseBlobService was initialized.
:param str sas_token:
Shared access signature token created with
generate_shared_access_signature.
:param str snapshot:
An string value that uniquely identifies the snapshot. The value of
this query parameter indicates the snapshot version.
:return: blob access URL.
:rtype: str
'''
url = '{}://{}/{}/{}'.format(
protocol or self.protocol,
self.primary_endpoint,
container_name,
blob_name,
)
if snapshot and sas_token:
url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token)
elif snapshot:
url = '{}?snapshot={}'.format(url, snapshot)
elif sas_token:
url = '{}?{}'.format(url, sas_token)
return url
def make_container_url(self, container_name, protocol=None, sas_token=None):
'''
Creates the url to access a container.
:param str container_name:
Name of container.
:param str protocol:
Protocol to use: 'http' or 'https'. If not specified, uses the
protocol specified when BaseBlobService was initialized.
:param str sas_token:
Shared access signature token created with
generate_shared_access_signature.
:return: container access URL.
:rtype: str
'''
url = '{}://{}/{}?restype=container'.format(
protocol or self.protocol,
self.primary_endpoint,
container_name,
)
if sas_token:
url = '{}&{}'.format(url, sas_token)
return url
def generate_account_shared_access_signature(self, resource_types, permission,
expiry, start=None, ip=None, protocol=None):
'''
Generates a shared access signature for the blob service.
Use the returned signature with the sas_token parameter of any BlobService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = BlobSharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(Services.BLOB, resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol)
def generate_container_shared_access_signature(self, container_name,
permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None, user_delegation_key=None):
'''
Generates a shared access signature for the container.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param ContainerPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
Instead of an account key, the user could pass in a user delegation key.
A user delegation key can be obtained from the service by authenticating with an AAD identity;
this can be accomplished by calling get_user_delegation_key.
When present, the SAS is signed with the user delegation key instead.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('container_name', container_name)
_validate_not_none('self.account_name', self.account_name)
if user_delegation_key is not None:
_validate_user_delegation_key(user_delegation_key)
sas = BlobSharedAccessSignature(self.account_name, user_delegation_key=user_delegation_key)
else:
_validate_not_none('self.account_key', self.account_key)
sas = BlobSharedAccessSignature(self.account_name, account_key=self.account_key)
return sas.generate_container(
container_name,
permission,
expiry,
start=start,
id=id,
ip=ip,
protocol=protocol,
cache_control=cache_control,
content_disposition=content_disposition,
content_encoding=content_encoding,
content_language=content_language,
content_type=content_type,
)
def generate_blob_shared_access_signature(
self, container_name, blob_name, snapshot=None, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None, user_delegation_key=None):
'''
Generates a shared access signature for the blob or one of its snapshots.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to grant permission.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use :func:`~set_container_acl`.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:param ~azure.storage.blob.models.UserDelegationKey user_delegation_key:
Instead of an account key, the user could pass in a user delegation key.
A user delegation key can be obtained from the service by authenticating with an AAD identity;
this can be accomplished by calling get_user_delegation_key.
When present, the SAS is signed with the user delegation key instead.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('self.account_name', self.account_name)
if user_delegation_key is not None:
_validate_user_delegation_key(user_delegation_key)
sas = BlobSharedAccessSignature(self.account_name, user_delegation_key=user_delegation_key)
else:
_validate_not_none('self.account_key', self.account_key)
sas = BlobSharedAccessSignature(self.account_name, account_key=self.account_key)
return sas.generate_blob(
container_name=container_name,
blob_name=blob_name,
snapshot=snapshot,
permission=permission,
expiry=expiry,
start=start,
id=id,
ip=ip,
protocol=protocol,
cache_control=cache_control,
content_disposition=content_disposition,
content_encoding=content_encoding,
content_language=content_language,
content_type=content_type,
)
def get_user_delegation_key(self, key_start_time, key_expiry_time, timeout=None):
"""
Obtain a user delegation key for the purpose of signing SAS tokens.
A token credential must be present on the service object for this request to succeed.
:param datetime key_start_time:
A DateTime value. Indicates when the key becomes valid.
:param datetime key_expiry_time:
A DateTime value. Indicates when the key stops being valid.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The user delegation key.
:rtype: ~azure.storage.blob.models.UserDelegationKey
"""
_validate_not_none('key_start_time', key_start_time)
_validate_not_none('key_end_time', key_expiry_time)
request = HTTPRequest()
request.method = 'POST'
request.host_locations = self._get_host_locations(secondary=True)
request.query = {
'restype': 'service',
'comp': 'userdelegationkey',
'timeout': _int_to_str(timeout),
}
request.body = _get_request_body(_convert_delegation_key_info_to_xml(key_start_time, key_expiry_time))
return self._perform_request(request, _convert_xml_to_user_delegation_key)
def list_containers(self, prefix=None, num_results=None, include_metadata=False,
marker=None, timeout=None):
'''
Returns a generator to list the containers under the specified account.
The generator will lazily follow the continuation tokens returned by
the service and stop when all containers have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
containers, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str prefix:
Filters the results to return only containers whose names
begin with the specified prefix.
:param int num_results:
Specifies the maximum number of containers to return. A single list
request may return up to 1000 contianers and potentially a continuation
token which should be followed to get additional resutls.
:param bool include_metadata:
Specifies that container metadata be returned in the response.
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
include = 'metadata' if include_metadata else None
operation_context = _OperationContext(location_lock=True)
kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
'include': include, 'timeout': timeout, '_context': operation_context}
resp = self._list_containers(**kwargs)
return ListGenerator(resp, self._list_containers, (), kwargs)
def _list_containers(self, prefix=None, marker=None, max_results=None,
include=None, timeout=None, _context=None):
'''
Returns a list of the containers under the specified account.
:param str prefix:
Filters the results to return only containers whose names
begin with the specified prefix.
:param str marker:
A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a next_marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
:param int max_results:
Specifies the maximum number of containers to return. A single list
request may return up to 1000 contianers and potentially a continuation
token which should be followed to get additional resutls.
:param str include:
Include this parameter to specify that the container's
metadata be returned as part of the response body. set this
parameter to string 'metadata' to get container's metadata.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path()
request.query = {
'comp': 'list',
'prefix': _to_str(prefix),
'marker': _to_str(marker),
'maxresults': _int_to_str(max_results),
'include': _to_str(include),
'timeout': _int_to_str(timeout)
}
return self._perform_request(request, _convert_xml_to_containers, operation_context=_context)
def create_container(self, container_name, metadata=None,
public_access=None, fail_on_exist=False, timeout=None):
'''
Creates a new container under the specified account. If the container
with the same name already exists, the operation fails if
fail_on_exist is True.
:param str container_name:
Name of container to create.
The container name may only contain lowercase letters, numbers, and hyphens, and must begin with a letter or
a number. Each hyphen must be preceded and followed by a non-hyphen character. The name must also be
between 3 and 63 characters long.
:param metadata:
A dict with name_value pairs to associate with the
container as metadata. Example:{'Category':'test'}
:type metadata: dict(str, str)
:param ~azure.storage.blob.models.PublicAccess public_access:
Possible values include: container, blob.
:param bool fail_on_exist:
Specify whether to throw an exception when the container exists.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: True if container is created, False if container already exists.
:rtype: bool
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-public-access': _to_str(public_access)
}
_add_metadata_headers(metadata, request)
if not fail_on_exist:
try:
self._perform_request(request, expected_errors=[_CONTAINER_ALREADY_EXISTS_ERROR_CODE])
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_container_properties(self, container_name, lease_id=None, timeout=None):
'''
Returns all user-defined metadata and system properties for the specified
container. The data returned does not include the container's list of blobs.
:param str container_name:
Name of existing container.
:param str lease_id:
If specified, get_container_properties only succeeds if the
container's lease is active and matches this ID.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: properties for the specified container within a container object.
:rtype: :class:`~azure.storage.blob.models.Container`
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'timeout': _int_to_str(timeout),
}
request.headers = {'x-ms-lease-id': _to_str(lease_id)}
return self._perform_request(request, _parse_container, [container_name])
def get_container_metadata(self, container_name, lease_id=None, timeout=None):
'''
Returns all user-defined metadata for the specified container.
:param str container_name:
Name of existing container.
:param str lease_id:
If specified, get_container_metadata only succeeds if the
container's lease is active and matches this ID.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
A dictionary representing the container metadata name, value pairs.
:rtype: dict(str, str)
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'comp': 'metadata',
'timeout': _int_to_str(timeout),
}
request.headers = {'x-ms-lease-id': _to_str(lease_id)}
return self._perform_request(request, _parse_metadata)
def set_container_metadata(self, container_name, metadata=None,
lease_id=None, if_modified_since=None, timeout=None):
'''
Sets one or more user-defined name-value pairs for the specified
container. Each call to this operation replaces all existing metadata
attached to the container. To remove all metadata from the container,
call this operation with no metadata dict.
:param str container_name:
Name of existing container.
:param metadata:
A dict containing name-value pairs to associate with the container as
metadata. Example: {'category':'test'}
:type metadata: dict(str, str)
:param str lease_id:
If specified, set_container_metadata only succeeds if the
container's lease is active and matches this ID.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Container
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'comp': 'metadata',
'timeout': _int_to_str(timeout),
}
request.headers = {
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'x-ms-lease-id': _to_str(lease_id),
}
_add_metadata_headers(metadata, request)
return self._perform_request(request, _parse_base_properties)
def get_container_acl(self, container_name, lease_id=None, timeout=None):
'''
Gets the permissions for the specified container.
The permissions indicate whether container data may be accessed publicly.
:param str container_name:
Name of existing container.
:param lease_id:
If specified, get_container_acl only succeeds if the
container's lease is active and matches this ID.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A dictionary of access policies associated with the container. dict of str to
:class:`azure.storage.common.models.AccessPolicy` and a public_access property
if public access is turned on
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'comp': 'acl',
'timeout': _int_to_str(timeout),
}
request.headers = {'x-ms-lease-id': _to_str(lease_id)}
return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access)
def set_container_acl(self, container_name, signed_identifiers=None,
public_access=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None, timeout=None):
'''
Sets the permissions for the specified container or stored access
policies that may be used with Shared Access Signatures. The permissions
indicate whether blobs in a container may be accessed publicly.
:param str container_name:
Name of existing container.
:param signed_identifiers:
A dictionary of access policies to associate with the container. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`)
:param ~azure.storage.blob.models.PublicAccess public_access:
Possible values include: container, blob.
:param str lease_id:
If specified, set_container_acl only succeeds if the
container's lease is active and matches this ID.
:param datetime if_modified_since:
A datetime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified date/time.
:param datetime if_unmodified_since:
A datetime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Container
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_access_policies(signed_identifiers)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'comp': 'acl',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-blob-public-access': _to_str(public_access),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'x-ms-lease-id': _to_str(lease_id),
}
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
return self._perform_request(request, _parse_base_properties)
def delete_container(self, container_name, fail_not_exist=False,
lease_id=None, if_modified_since=None,
if_unmodified_since=None, timeout=None):
'''
Marks the specified container for deletion. The container and any blobs
contained within it are later deleted during garbage collection.
:param str container_name:
Name of container to delete.
:param bool fail_not_exist:
Specify whether to throw an exception when the container doesn't
exist.
:param str lease_id:
If specified, delete_container only succeeds if the
container's lease is active and matches this ID.
Required if the container has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: True if container is deleted, False container doesn't exist.
:rtype: bool
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
}
if not fail_not_exist:
try:
self._perform_request(request, expected_errors=[_CONTAINER_NOT_FOUND_ERROR_CODE])
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def _lease_container_impl(
self, container_name, lease_action, lease_id, lease_duration,
lease_break_period, proposed_lease_id, if_modified_since,
if_unmodified_since, timeout):
'''
Establishes and manages a lease on a container.
The Lease Container operation can be called in one of five modes
Acquire, to request a new lease
Renew, to renew an existing lease
Change, to change the ID of an existing lease
Release, to free the lease if it is no longer needed so that another
client may immediately acquire a lease against the container
Break, to end the lease but ensure that another client cannot acquire
a new lease until the current lease period has expired
:param str container_name:
Name of existing container.
:param str lease_action:
Possible _LeaseActions values: acquire|renew|release|break|change
:param str lease_id:
Required if the container has an active lease.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. For backwards compatibility, the default is
60, and the value is only used on an acquire operation.
:param int lease_break_period:
For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
:param str proposed_lease_id:
Optional for Acquire, required for Change. Proposed lease ID, in a
GUID string format. The Blob service returns 400 (Invalid request)
if the proposed lease ID is not in the correct format.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
Response headers returned from the service call.
:rtype: dict(str, str)
'''
_validate_not_none('container_name', container_name)
_validate_not_none('lease_action', lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'comp': 'lease',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'x-ms-lease-action': _to_str(lease_action),
'x-ms-lease-duration': _to_str(lease_duration),
'x-ms-lease-break-period': _to_str(lease_break_period),
'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
}
return self._perform_request(request, _parse_lease)
def acquire_container_lease(
self, container_name, lease_duration=-1, proposed_lease_id=None,
if_modified_since=None, if_unmodified_since=None, timeout=None):
'''
Requests a new lease. If the container does not have an active lease,
the Blob service creates a lease on the container and returns a new
lease ID.
:param str container_name:
Name of existing container.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. Default is -1 (infinite lease).
:param str proposed_lease_id:
Proposed lease ID, in a GUID string format. The Blob service returns
400 (Invalid request) if the proposed lease ID is not in the correct format.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: the lease ID of the newly created lease.
:return: str
'''
_validate_not_none('lease_duration', lease_duration)
if lease_duration != -1 and \
(lease_duration < 15 or lease_duration > 60):
raise ValueError(_ERROR_INVALID_LEASE_DURATION)
lease = self._lease_container_impl(container_name,
_LeaseActions.Acquire,
None, # lease_id
lease_duration,
None, # lease_break_period
proposed_lease_id,
if_modified_since,
if_unmodified_since,
timeout)
return lease['id']
def renew_container_lease(
self, container_name, lease_id, if_modified_since=None,
if_unmodified_since=None, timeout=None):
'''
Renews the lease. The lease can be renewed if the lease ID specified
matches that associated with the container. Note that
the lease may be renewed even if it has expired as long as the container
has not been leased again since the expiration of that lease. When you
renew a lease, the lease duration clock resets.
:param str container_name:
Name of existing container.
:param str lease_id:
Lease ID for active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: the lease ID of the renewed lease.
:return: str
'''
_validate_not_none('lease_id', lease_id)
lease = self._lease_container_impl(container_name,
_LeaseActions.Renew,
lease_id,
None, # lease_duration
None, # lease_break_period
None, # proposed_lease_id
if_modified_since,
if_unmodified_since,
timeout)
return lease['id']
def release_container_lease(
self, container_name, lease_id, if_modified_since=None,
if_unmodified_since=None, timeout=None):
'''
Release the lease. The lease may be released if the lease_id specified matches
that associated with the container. Releasing the lease allows another client
to immediately acquire the lease for the container as soon as the release is complete.
:param str container_name:
Name of existing container.
:param str lease_id:
Lease ID for active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('lease_id', lease_id)
self._lease_container_impl(container_name,
_LeaseActions.Release,
lease_id,
None, # lease_duration
None, # lease_break_period
None, # proposed_lease_id
if_modified_since,
if_unmodified_since,
timeout)
def break_container_lease(
self, container_name, lease_break_period=None,
if_modified_since=None, if_unmodified_since=None, timeout=None):
'''
Break the lease, if the container has an active lease. Once a lease is
broken, it cannot be renewed. Any authorized request can break the lease;
the request is not required to specify a matching lease ID. When a lease
is broken, the lease break period is allowed to elapse, during which time
no lease operation except break and release can be performed on the container.
When a lease is successfully broken, the response indicates the interval
in seconds until a new lease can be acquired.
:param str container_name:
Name of existing container.
:param int lease_break_period:
This is the proposed duration of seconds that the lease
should continue before it is broken, between 0 and 60 seconds. This
break period is only used if it is shorter than the time remaining
on the lease. If longer, the time remaining on the lease is used.
A new lease will not be available before the break period has
expired, but the lease may be held for longer than the break
period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: Approximate time remaining in the lease period, in seconds.
:return: int
'''
if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
lease = self._lease_container_impl(container_name,
_LeaseActions.Break,
None, # lease_id
None, # lease_duration
lease_break_period,
None, # proposed_lease_id
if_modified_since,
if_unmodified_since,
timeout)
return lease['time']
def change_container_lease(
self, container_name, lease_id, proposed_lease_id,
if_modified_since=None, if_unmodified_since=None, timeout=None):
'''
Change the lease ID of an active lease. A change must include the current
lease ID and a new lease ID.
:param str container_name:
Name of existing container.
:param str lease_id:
Lease ID for active lease.
:param str proposed_lease_id:
Proposed lease ID, in a GUID string format. The Blob service returns 400
(Invalid request) if the proposed lease ID is not in the correct format.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('lease_id', lease_id)
self._lease_container_impl(container_name,
_LeaseActions.Change,
lease_id,
None, # lease_duration
None, # lease_break_period
proposed_lease_id,
if_modified_since,
if_unmodified_since,
timeout)
def list_blobs(self, container_name, prefix=None, num_results=None, include=None,
delimiter=None, marker=None, timeout=None):
'''
Returns a generator to list the blobs under the specified container.
The generator will lazily follow the continuation tokens returned by
the service and stop when all blobs have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
blobs, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str container_name:
Name of existing container.
:param str prefix:
Filters the results to return only blobs whose names
begin with the specified prefix.
:param int num_results:
Specifies the maximum number of blobs to return,
including all :class:`BlobPrefix` elements. If the request does not specify
num_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting num_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param ~azure.storage.blob.models.Include include:
Specifies one or more additional datasets to include in the response.
:param str delimiter:
When the request includes this parameter, the operation
returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
result list that acts as a placeholder for all blobs whose names begin
with the same substring up to the appearance of the delimiter character.
The delimiter may be a single character or a string.
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
operation_context = _OperationContext(location_lock=True)
args = (container_name,)
kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
'include': include, 'delimiter': delimiter, 'timeout': timeout,
'_context': operation_context,
'_converter': _convert_xml_to_blob_list}
resp = self._list_blobs(*args, **kwargs)
return ListGenerator(resp, self._list_blobs, args, kwargs)
def list_blob_names(self, container_name, prefix=None, num_results=None,
include=None, delimiter=None, marker=None,
timeout=None):
'''
Returns a generator to list the blob names under the specified container.
The generator will lazily follow the continuation tokens returned by
the service and stop when all blobs have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
blobs, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str container_name:
Name of existing container.
:param str prefix:
Filters the results to return only blobs whose names
begin with the specified prefix.
:param int num_results:
Specifies the maximum number of blobs to return,
including all :class:`BlobPrefix` elements. If the request does not specify
num_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting num_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param ~azure.storage.blob.models.Include include:
Specifies one or more additional datasets to include in the response.
:param str delimiter:
When the request includes this parameter, the operation
returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the
result list that acts as a placeholder for all blobs whose names begin
with the same substring up to the appearance of the delimiter character.
The delimiter may be a single character or a string.
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
operation_context = _OperationContext(location_lock=True)
args = (container_name,)
kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results,
'include': include, 'delimiter': delimiter, 'timeout': timeout,
'_context': operation_context,
'_converter': _convert_xml_to_blob_name_list}
resp = self._list_blobs(*args, **kwargs)
return ListGenerator(resp, self._list_blobs, args, kwargs)
def _list_blobs(self, container_name, prefix=None, marker=None,
max_results=None, include=None, delimiter=None, timeout=None,
_context=None, _converter=None):
'''
Returns the list of blobs under the specified container.
:param str container_name:
Name of existing container.
:parm str prefix:
Filters the results to return only blobs whose names
begin with the specified prefix.
:param str marker:
A string value that identifies the portion of the list
to be returned with the next list operation. The operation returns
a next_marker value within the response body if the list returned was
not complete. The marker value may then be used in a subsequent
call to request the next set of list items. The marker value is
opaque to the client.
:param int max_results:
Specifies the maximum number of blobs to return,
including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify
max_results or specifies a value greater than 5,000, the server will
return up to 5,000 items. Setting max_results to a value less than
or equal to zero results in error response code 400 (Bad Request).
:param str include:
Specifies one or more datasets to include in the
response. To specify more than one of these options on the URI,
you must separate each option with a comma. Valid values are:
snapshots:
Specifies that snapshots should be included in the
enumeration. Snapshots are listed from oldest to newest in
the response.
metadata:
Specifies that blob metadata be returned in the response.
uncommittedblobs:
Specifies that blobs for which blocks have been uploaded,
but which have not been committed using Put Block List
(REST API), be included in the response.
copy:
Version 2012-02-12 and newer. Specifies that metadata
related to any current or previous Copy Blob operation
should be included in the response.
deleted:
Version 2017-07-29 and newer. Specifies that soft deleted blobs
which are retained by the service should be included
in the response.
:param str delimiter:
When the request includes this parameter, the operation
returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a
placeholder for all blobs whose names begin with the same
substring up to the appearance of the delimiter character. The
delimiter may be a single character or a string.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name)
request.query = {
'restype': 'container',
'comp': 'list',
'prefix': _to_str(prefix),
'delimiter': _to_str(delimiter),
'marker': _to_str(marker),
'maxresults': _int_to_str(max_results),
'include': _to_str(include),
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _converter, operation_context=_context)
def get_blob_account_information(self, container_name=None, blob_name=None, timeout=None):
"""
Gets information related to the storage account.
The information can also be retrieved if the user has a SAS to a container or blob.
:param str container_name:
Name of existing container.
Optional, unless using a SAS token to a specific container or blob, in which case it's required.
:param str blob_name:
Name of existing blob.
Optional, unless using a SAS token to a specific blob, in which case it's required.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The :class:`~azure.storage.blob.models.AccountInformation`.
"""
request = HTTPRequest()
request.method = 'HEAD'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'restype': 'account',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _parse_account_information)
def get_blob_service_stats(self, timeout=None):
'''
Retrieves statistics related to replication for the Blob service. It is
only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The blob service stats.
:rtype: :class:`~azure.storage.common.models.ServiceStats`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(primary=False, secondary=True)
request.path = _get_path()
request.query = {
'restype': 'service',
'comp': 'stats',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_stats)
def set_blob_service_properties(
self, logging=None, hour_metrics=None, minute_metrics=None,
cors=None, target_version=None, timeout=None, delete_retention_policy=None, static_website=None):
'''
Sets the properties of a storage account's Blob service, including
Azure Storage Analytics. If an element (ex Logging) is left as None, the
existing settings on the service for that functionality are preserved.
:param logging:
Groups the Azure Analytics Logging settings.
:type logging:
:class:`~azure.storage.common.models.Logging`
:param hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for blobs.
:type hour_metrics:
:class:`~azure.storage.common.models.Metrics`
:param minute_metrics:
The minute metrics settings provide request statistics
for each minute for blobs.
:type minute_metrics:
:class:`~azure.storage.common.models.Metrics`
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service.
:type cors: list(:class:`~azure.storage.common.models.CorsRule`)
:param str target_version:
Indicates the default version to use for requests if an incoming
request's version is not specified.
:param int timeout:
The timeout parameter is expressed in seconds.
:param delete_retention_policy:
The delete retention policy specifies whether to retain deleted blobs.
It also specifies the number of days and versions of blob to keep.
:type delete_retention_policy:
:class:`~azure.storage.common.models.DeleteRetentionPolicy`
:param static_website:
Specifies whether the static website feature is enabled,
and if yes, indicates the index document and 404 error document to use.
:type static_website:
:class:`~azure.storage.common.models.StaticWebsite`
'''
if all(parameter is None for parameter in [logging, hour_metrics, minute_metrics, cors, target_version,
delete_retention_policy, static_website]):
raise ValueError("set_blob_service_properties should be called with at least one parameter")
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path()
request.query = {
'restype': 'service',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.body = _get_request_body(
_convert_service_properties_to_xml(logging, hour_metrics, minute_metrics,
cors, target_version, delete_retention_policy, static_website))
self._perform_request(request)
def get_blob_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Blob service, including
Azure Storage Analytics.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The blob :class:`~azure.storage.common.models.ServiceProperties` with an attached
target_version property.
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path()
request.query = {
'restype': 'service',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_properties)
def get_blob_properties(
self, container_name, blob_name, snapshot=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Returns all user-defined metadata, standard HTTP properties, and
system properties for the blob. It does not return the content of the blob.
Returns :class:`~azure.storage.blob.models.Blob`
with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: a blob object including properties and metadata.
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'HEAD'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
return self._perform_request(request, _parse_blob, [blob_name, snapshot])
def set_blob_properties(
self, container_name, blob_name, content_settings=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Sets system properties on the blob. If one property is set for the
content_settings, all properties will be overriden.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.headers = {
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
'x-ms-lease-id': _to_str(lease_id)
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
return self._perform_request(request, _parse_base_properties)
def exists(self, container_name, blob_name=None, snapshot=None, timeout=None):
'''
Returns a boolean indicating whether the container exists (if blob_name
is None), or otherwise a boolean indicating whether the blob exists.
:param str container_name:
Name of a container.
:param str blob_name:
Name of a blob. If None, the container will be checked for existence.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the snapshot.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A boolean indicating whether the resource exists.
:rtype: bool
'''
_validate_not_none('container_name', container_name)
try:
# make head request to see if container/blob/snapshot exists
request = HTTPRequest()
request.method = 'GET' if blob_name is None else 'HEAD'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout),
'restype': 'container' if blob_name is None else None,
}
expected_errors = [_CONTAINER_NOT_FOUND_ERROR_CODE] if blob_name is None \
else [_CONTAINER_NOT_FOUND_ERROR_CODE, _BLOB_NOT_FOUND_ERROR_CODE]
self._perform_request(request, expected_errors=expected_errors)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
def _get_blob(
self, container_name, blob_name, snapshot=None, start_range=None,
end_range=None, validate_content=False, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, cpk=None,
_context=None):
'''
Downloads a blob's content, metadata, and properties. You can also
call this API to read a snapshot. You can specify a range if you don't
need to download the blob in its entirety. If no range is specified,
the full blob will be downloaded.
See get_blob_to_* for high level functions that handle the download
of large blobs with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
:param int start_range:
Start of byte range to use for downloading a section of the blob.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param int end_range:
End of byte range to use for downloading a section of the blob.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param bool validate_content:
When this is set to True and specified together with the Range header,
the service returns the MD5 hash for the range, as long as the range
is less than or equal to 4 MB in size.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A Blob with content, properties, and metadata.
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_decryption_required(self.require_encryption,
self.key_encryption_key,
self.key_resolver_function)
start_offset, end_offset = 0, 0
if self.key_encryption_key is not None or self.key_resolver_function is not None:
if start_range is not None:
# Align the start of the range along a 16 byte block
start_offset = start_range % 16
start_range -= start_offset
# Include an extra 16 bytes for the IV if necessary
# Because of the previous offsetting, start_range will always
# be a multiple of 16.
if start_range > 0:
start_offset += 16
start_range -= 16
if end_range is not None:
# Align the end of the range along a 16 byte block
end_offset = 15 - (end_range % 16)
end_range += end_offset
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
check_content_md5=validate_content)
return self._perform_request(request, _parse_blob,
[blob_name, snapshot, validate_content, self.require_encryption,
self.key_encryption_key, self.key_resolver_function,
start_offset, end_offset],
operation_context=_context)
def get_blob_to_path(
self, container_name, blob_name, file_path, open_mode='wb',
snapshot=None, start_range=None, end_range=None,
validate_content=False, progress_callback=None,
max_connections=2, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, cpk=None):
'''
Downloads a blob to a file path, with automatic chunking and progress
notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
properties and metadata.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str file_path:
Path of file to write out to.
:param str open_mode:
Mode to use when opening the file. Note that specifying append only
open_mode prevents parallel download. So, max_connections must be set
to 1 if this open_mode is used.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
:param int start_range:
Start of byte range to use for downloading a section of the blob.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param int end_range:
End of byte range to use for downloading a section of the blob.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the blob. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the blob if known.
:type progress_callback: func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be useful if many blobs are
expected to be empty as an extra request is required for empty blobs
if max_connections is greater than 1.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A Blob with properties and metadata. If max_connections is greater
than 1, the content_md5 (if set on the blob) will not be returned. If you
require this value, either use get_blob_properties or set max_connections
to 1.
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('file_path', file_path)
_validate_not_none('open_mode', open_mode)
if max_connections > 1 and 'a' in open_mode:
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
with open(file_path, open_mode) as stream:
blob = self.get_blob_to_stream(
container_name,
blob_name,
stream,
snapshot,
start_range,
end_range,
validate_content,
progress_callback,
max_connections,
lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout=timeout,
cpk=cpk)
return blob
def get_blob_to_stream(
self, container_name, blob_name, stream, snapshot=None,
start_range=None, end_range=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Downloads a blob to a stream, with automatic chunking and progress
notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
properties and metadata.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param io.IOBase stream:
Opened stream to write to.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
:param int start_range:
Start of byte range to use for downloading a section of the blob.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param int end_range:
End of byte range to use for downloading a section of the blob.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the blob. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the blob if known.
:type progress_callback: func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be useful if many blobs are
expected to be empty as an extra request is required for empty blobs
if max_connections is greater than 1.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A Blob with properties and metadata. If max_connections is greater
than 1, the content_md5 (if set on the blob) will not be returned. If you
require this value, either use get_blob_properties or set max_connections
to 1.
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('stream', stream)
if end_range is not None:
_validate_not_none("start_range", start_range)
# the stream must be seekable if parallel download is required
if max_connections > 1:
if sys.version_info >= (3,) and not stream.seekable():
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
try:
stream.seek(stream.tell())
except (NotImplementedError, AttributeError):
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
# The service only provides transactional MD5s for chunks under 4MB.
# If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
# chunk so a transactional MD5 can be retrieved.
first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE
initial_request_start = start_range if start_range is not None else 0
if end_range is not None and end_range - start_range < first_get_size:
initial_request_end = end_range
else:
initial_request_end = initial_request_start + first_get_size - 1
# Send a context object to make sure we always retry to the initial location
operation_context = _OperationContext(location_lock=True)
try:
blob = self._get_blob(container_name,
blob_name,
snapshot,
start_range=initial_request_start,
end_range=initial_request_end,
validate_content=validate_content,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
_context=operation_context,
cpk=cpk)
# Parse the total blob size and adjust the download size if ranges
# were specified
blob_size = _parse_length_from_content_range(blob.properties.content_range)
if end_range is not None:
# Use the end_range unless it is over the end of the blob
download_size = min(blob_size, end_range - start_range + 1)
elif start_range is not None:
download_size = blob_size - start_range
else:
download_size = blob_size
except AzureHttpError as ex:
if start_range is None and ex.status_code == 416:
# Get range will fail on an empty blob. If the user did not
# request a range, do a regular get request in order to get
# any properties.
blob = self._get_blob(container_name,
blob_name,
snapshot,
validate_content=validate_content,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout,
_context=operation_context,
cpk=cpk)
# Set the download size to empty
download_size = 0
else:
raise ex
# Mark the first progress chunk. If the blob is small or this is a single
# shot download, this is the only call
if progress_callback:
progress_callback(blob.properties.content_length, download_size)
# Write the content to the user stream
# Clear blob content since output has been written to user stream
if blob.content is not None:
stream.write(blob.content)
blob.content = None
# If the blob is small, the download is complete at this point.
# If blob size is large, download the rest of the blob in chunks.
if blob.properties.content_length != download_size:
# Lock on the etag. This can be overriden by the user by specifying '*'
if_match = if_match if if_match is not None else blob.properties.etag
end_blob = blob_size
if end_range is not None:
# Use the end_range unless it is over the end of the blob
end_blob = min(blob_size, end_range + 1)
_download_blob_chunks(
self,
container_name,
blob_name,
snapshot,
download_size,
self.MAX_CHUNK_GET_SIZE,
first_get_size,
initial_request_end + 1, # start where the first download ended
end_blob,
stream,
max_connections,
progress_callback,
validate_content,
lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout,
operation_context,
cpk,
)
# Set the content length to the download size instead of the size of
# the last range
blob.properties.content_length = download_size
# Overwrite the content range to the user requested range
blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size)
# Overwrite the content MD5 as it is the MD5 for the last range instead
# of the stored MD5
# TODO: Set to the stored MD5 when the service returns this
blob.properties.content_md5 = None
return blob
def get_blob_to_bytes(
self, container_name, blob_name, snapshot=None,
start_range=None, end_range=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Downloads a blob as an array of bytes, with automatic chunking and
progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
properties, metadata, and content.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
:param int start_range:
Start of byte range to use for downloading a section of the blob.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param int end_range:
End of byte range to use for downloading a section of the blob.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the blob. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the blob if known.
:type progress_callback: func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be useful if many blobs are
expected to be empty as an extra request is required for empty blobs
if max_connections is greater than 1.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A Blob with properties and metadata. If max_connections is greater
than 1, the content_md5 (if set on the blob) will not be returned. If you
require this value, either use get_blob_properties or set max_connections
to 1.
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
stream = BytesIO()
blob = self.get_blob_to_stream(
container_name,
blob_name,
stream,
snapshot,
start_range,
end_range,
validate_content,
progress_callback,
max_connections,
lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout=timeout,
cpk=cpk)
blob.content = stream.getvalue()
return blob
def get_blob_to_text(
self, container_name, blob_name, encoding='utf-8', snapshot=None,
start_range=None, end_range=None, validate_content=False,
progress_callback=None, max_connections=2, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Downloads a blob as unicode text, with automatic chunking and progress
notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with
properties, metadata, and content.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str encoding:
Python encoding to use when decoding the blob data.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to retrieve.
:param int start_range:
Start of byte range to use for downloading a section of the blob.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param int end_range:
End of byte range to use for downloading a section of the blob.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of blob.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the blob. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the blob if known.
:type progress_callback: func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be useful if many blobs are
expected to be empty as an extra request is required for empty blobs
if max_connections is greater than 1.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A Blob with properties and metadata. If max_connections is greater
than 1, the content_md5 (if set on the blob) will not be returned. If you
require this value, either use get_blob_properties or set max_connections
to 1.
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('encoding', encoding)
blob = self.get_blob_to_bytes(container_name,
blob_name,
snapshot,
start_range,
end_range,
validate_content,
progress_callback,
max_connections,
lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout=timeout,
cpk=cpk)
blob.content = blob.content.decode(encoding)
return blob
def get_blob_metadata(
self, container_name, blob_name, snapshot=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None, cpk=None):
'''
Returns all user-defined metadata for the specified blob or snapshot.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque value that,
when present, specifies the blob snapshot to retrieve.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Decrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
A dictionary representing the blob metadata name, value pairs.
:rtype: dict(str, str)
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_path(container_name, blob_name)
request.query = {
'snapshot': _to_str(snapshot),
'comp': 'metadata',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
return self._perform_request(request, _parse_metadata)
def set_blob_metadata(self, container_name, blob_name,
metadata=None, lease_id=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None, cpk=None):
'''
Sets user-defined metadata for the specified blob as one or more
name-value pairs.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param metadata:
Dict containing name and value pairs. Each call to this operation
replaces all existing metadata attached to the blob. To remove all
metadata from the blob, call this operation with no metadata headers.
:type metadata: dict(str, str)
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'metadata',
'timeout': _int_to_str(timeout),
}
request.headers = {
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
'x-ms-lease-id': _to_str(lease_id),
}
_add_metadata_headers(metadata, request)
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
return self._perform_request(request, _parse_base_properties)
def _lease_blob_impl(self, container_name, blob_name,
lease_action, lease_id,
lease_duration, lease_break_period,
proposed_lease_id, if_modified_since,
if_unmodified_since, if_match, if_none_match, timeout=None):
'''
Establishes and manages a lease on a blob for write and delete operations.
The Lease Blob operation can be called in one of five modes:
Acquire, to request a new lease.
Renew, to renew an existing lease.
Change, to change the ID of an existing lease.
Release, to free the lease if it is no longer needed so that another
client may immediately acquire a lease against the blob.
Break, to end the lease but ensure that another client cannot acquire
a new lease until the current lease period has expired.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str lease_action:
Possible _LeaseActions acquire|renew|release|break|change
:param str lease_id:
Required if the blob has an active lease.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change.
:param int lease_break_period:
For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
:param str proposed_lease_id:
Optional for acquire, required for change. Proposed lease ID, in a
GUID string format. The Blob service returns 400 (Invalid request)
if the proposed lease ID is not in the correct format.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return:
Response headers returned from the service call.
:rtype: dict(str, str)
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('lease_action', lease_action)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'lease',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'x-ms-lease-action': _to_str(lease_action),
'x-ms-lease-duration': _to_str(lease_duration),
'x-ms-lease-break-period': _to_str(lease_break_period),
'x-ms-proposed-lease-id': _to_str(proposed_lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
return self._perform_request(request, _parse_lease)
def acquire_blob_lease(self, container_name, blob_name,
lease_duration=-1,
proposed_lease_id=None,
if_modified_since=None,
if_unmodified_since=None,
if_match=None,
if_none_match=None, timeout=None):
'''
Requests a new lease. If the blob does not have an active lease, the Blob
service creates a lease on the blob and returns a new lease ID.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int lease_duration:
Specifies the duration of the lease, in seconds, or negative one
(-1) for a lease that never expires. A non-infinite lease can be
between 15 and 60 seconds. A lease duration cannot be changed
using renew or change. Default is -1 (infinite lease).
:param str proposed_lease_id:
Proposed lease ID, in a GUID string format. The Blob service
returns 400 (Invalid request) if the proposed lease ID is not
in the correct format.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: the lease ID of the newly created lease.
:return: str
'''
_validate_not_none('lease_duration', lease_duration)
if lease_duration != -1 and \
(lease_duration < 15 or lease_duration > 60):
raise ValueError(_ERROR_INVALID_LEASE_DURATION)
lease = self._lease_blob_impl(container_name,
blob_name,
_LeaseActions.Acquire,
None, # lease_id
lease_duration,
None, # lease_break_period
proposed_lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout)
return lease['id']
def renew_blob_lease(self, container_name, blob_name,
lease_id, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Renews the lease. The lease can be renewed if the lease ID specified on
the request matches that associated with the blob. Note that the lease may
be renewed even if it has expired as long as the blob has not been modified
or leased again since the expiration of that lease. When you renew a lease,
the lease duration clock resets.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str lease_id:
Lease ID for active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: the lease ID of the renewed lease.
:return: str
'''
_validate_not_none('lease_id', lease_id)
lease = self._lease_blob_impl(container_name,
blob_name,
_LeaseActions.Renew,
lease_id,
None, # lease_duration
None, # lease_break_period
None, # proposed_lease_id
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout)
return lease['id']
def release_blob_lease(self, container_name, blob_name,
lease_id, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, timeout=None):
'''
Releases the lease. The lease may be released if the lease ID specified on the
request matches that associated with the blob. Releasing the lease allows another
client to immediately acquire the lease for the blob as soon as the release is complete.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str lease_id:
Lease ID for active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('lease_id', lease_id)
self._lease_blob_impl(container_name,
blob_name,
_LeaseActions.Release,
lease_id,
None, # lease_duration
None, # lease_break_period
None, # proposed_lease_id
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout)
def break_blob_lease(self, container_name, blob_name,
lease_break_period=None,
if_modified_since=None,
if_unmodified_since=None,
if_match=None,
if_none_match=None, timeout=None):
'''
Breaks the lease, if the blob has an active lease. Once a lease is broken,
it cannot be renewed. Any authorized request can break the lease; the request
is not required to specify a matching lease ID. When a lease is broken,
the lease break period is allowed to elapse, during which time no lease operation
except break and release can be performed on the blob. When a lease is successfully
broken, the response indicates the interval in seconds until a new lease can be acquired.
A lease that has been broken can also be released, in which case another client may
immediately acquire the lease on the blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int lease_break_period:
For a break operation, this is the proposed duration of
seconds that the lease should continue before it is broken, between
0 and 60 seconds. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining
on the lease is used. A new lease will not be available before the
break period has expired, but the lease may be held for longer than
the break period. If this header does not appear with a break
operation, a fixed-duration lease breaks after the remaining lease
period elapses, and an infinite lease breaks immediately.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: Approximate time remaining in the lease period, in seconds.
:return: int
'''
if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60):
raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD)
lease = self._lease_blob_impl(container_name,
blob_name,
_LeaseActions.Break,
None, # lease_id
None, # lease_duration
lease_break_period,
None, # proposed_lease_id
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout)
return lease['time']
def change_blob_lease(self, container_name, blob_name,
lease_id,
proposed_lease_id,
if_modified_since=None,
if_unmodified_since=None,
if_match=None,
if_none_match=None, timeout=None):
'''
Changes the lease ID of an active lease. A change must include the current
lease ID and a new lease ID.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str lease_id:
Required if the blob has an active lease.
:param str proposed_lease_id:
Proposed lease ID, in a GUID string format. The Blob service returns
400 (Invalid request) if the proposed lease ID is not in the correct format.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
self._lease_blob_impl(container_name,
blob_name,
_LeaseActions.Change,
lease_id,
None, # lease_duration
None, # lease_break_period
proposed_lease_id,
if_modified_since,
if_unmodified_since,
if_match,
if_none_match,
timeout)
def snapshot_blob(self, container_name, blob_name,
metadata=None, if_modified_since=None,
if_unmodified_since=None, if_match=None,
if_none_match=None, lease_id=None, timeout=None, cpk=None):
'''
Creates a read-only snapshot of a blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param metadata:
Specifies a user-defined name-value pair associated with the blob.
If no name-value pairs are specified, the operation will copy the
base blob metadata to the snapshot. If one or more name-value pairs
are specified, the snapshot is created with the specified metadata,
and metadata is not copied from the base blob.
:type metadata: dict(str, str)
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param str lease_id:
Required if the blob has an active lease.
:param ~azure.storage.blob.models.CustomerProvidedEncryptionKey cpk:
Encrypts the data on the service-side with the given key.
Use of customer-provided keys must be done over HTTPS.
As the encryption key itself is provided in the request,
a secure connection must be established to transfer the key.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: snapshot properties
:rtype: :class:`~azure.storage.blob.models.Blob`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'snapshot',
'timeout': _int_to_str(timeout),
}
request.headers = {
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
'x-ms-lease-id': _to_str(lease_id)
}
_validate_and_add_cpk_headers(request, encryption_key=cpk, protocol=self.protocol)
_add_metadata_headers(metadata, request)
return self._perform_request(request, _parse_snapshot_blob, [blob_name])
def copy_blob(self, container_name, blob_name, copy_source,
metadata=None,
source_if_modified_since=None,
source_if_unmodified_since=None,
source_if_match=None, source_if_none_match=None,
destination_if_modified_since=None,
destination_if_unmodified_since=None,
destination_if_match=None,
destination_if_none_match=None,
destination_lease_id=None,
source_lease_id=None, timeout=None):
'''
Copies a blob asynchronously. This operation returns a copy operation
properties object, including a copy ID you can use to check or abort the
copy operation. The Blob service copies blobs on a best-effort basis.
The source blob for a copy operation may be a block blob, an append blob,
or a page blob. If the destination blob already exists, it must be of the
same blob type as the source blob. Any existing destination blob will be
overwritten. The destination blob cannot be modified while a copy operation
is in progress.
When copying from a page blob, the Blob service creates a destination page
blob of the source blob's length, initially containing all zeroes. Then
the source page ranges are enumerated, and non-empty ranges are copied.
For a block blob or an append blob, the Blob service creates a committed
blob of zero length before returning from this operation. When copying
from a block blob, all committed blocks and their block IDs are copied.
Uncommitted blocks are not copied. At the end of the copy operation, the
destination blob will have the same committed block count as the source.
When copying from an append blob, all committed blocks are copied. At the
end of the copy operation, the destination blob will have the same committed
block count as the source.
For all blob types, you can call get_blob_properties on the destination
blob to check the status of the copy operation. The final blob will be
committed when the copy completes.
:param str container_name:
Name of the destination container. The container must exist.
:param str blob_name:
Name of the destination blob. If the destination blob exists, it will
be overwritten. Otherwise, it will be created.
:param str copy_source:
A URL of up to 2 KB in length that specifies an Azure file or blob.
The value should be URL-encoded as it would appear in a request URI.
If the source is in another account, the source must either be public
or must be authenticated via a shared access signature. If the source
is public, no authentication is required.
Examples:
https://myaccount.blob.core.windows.net/mycontainer/myblob
https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
:param metadata:
Name-value pairs associated with the blob as metadata. If no name-value
pairs are specified, the operation will copy the metadata from the
source blob or file to the destination blob. If one or more name-value
pairs are specified, the destination blob is created with the specified
metadata, and metadata is not copied from the source blob or file.
:type metadata: dict(str, str)
:param datetime source_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the source
blob has been modified since the specified date/time.
:param datetime source_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only if the source blob
has not been modified since the specified date/time.
:param ETag source_if_match:
An ETag value, or the wildcard character (*). Specify this conditional
header to copy the source blob only if its ETag matches the value
specified. If the ETag values do not match, the Blob service returns
status code 412 (Precondition Failed). This header cannot be specified
if the source is an Azure File.
:param ETag source_if_none_match:
An ETag value, or the wildcard character (*). Specify this conditional
header to copy the blob only if its ETag does not match the value
specified. If the values are identical, the Blob service returns status
code 412 (Precondition Failed). This header cannot be specified if the
source is an Azure File.
:param datetime destination_if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has been modified since the specified date/time.
If the destination blob has not been modified, the Blob service returns
status code 412 (Precondition Failed).
:param datetime destination_if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this conditional header to copy the blob only
if the destination blob has not been modified since the specified
date/time. If the destination blob has been modified, the Blob service
returns status code 412 (Precondition Failed).
:param ETag destination_if_match:
An ETag value, or the wildcard character (*). Specify an ETag value for
this conditional header to copy the blob only if the specified ETag value
matches the ETag value for an existing destination blob. If the ETag for
the destination blob does not match the ETag specified for If-Match, the
Blob service returns status code 412 (Precondition Failed).
:param ETag destination_if_none_match:
An ETag value, or the wildcard character (*). Specify an ETag value for
this conditional header to copy the blob only if the specified ETag value
does not match the ETag value for the destination blob. Specify the wildcard
character (*) to perform the operation only if the destination blob does not
exist. If the specified condition isn't met, the Blob service returns status
code 412 (Precondition Failed).
:param str destination_lease_id:
The lease ID specified for this header must match the lease ID of the
destination blob. If the request does not include the lease ID or it is not
valid, the operation fails with status code 412 (Precondition Failed).
:param str source_lease_id:
Specify this to perform the Copy Blob operation only if
the lease ID given matches the active lease ID of the source blob.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: Copy operation properties such as status, source, and ID.
:rtype: :class:`~azure.storage.blob.models.CopyProperties`
'''
return self._copy_blob(container_name, blob_name, copy_source,
metadata,
None,
source_if_modified_since, source_if_unmodified_since,
source_if_match, source_if_none_match,
destination_if_modified_since,
destination_if_unmodified_since,
destination_if_match,
destination_if_none_match,
destination_lease_id,
source_lease_id, timeout,
False, False)
def _copy_blob(self, container_name, blob_name, copy_source,
metadata=None,
premium_page_blob_tier=None,
source_if_modified_since=None,
source_if_unmodified_since=None,
source_if_match=None, source_if_none_match=None,
destination_if_modified_since=None,
destination_if_unmodified_since=None,
destination_if_match=None,
destination_if_none_match=None,
destination_lease_id=None,
source_lease_id=None, timeout=None,
incremental_copy=False,
requires_sync=None,
standard_blob_tier=None,
rehydrate_priority=None):
'''
See copy_blob for more details. This helper method
allows for standard copies as well as incremental copies which are only supported for page blobs and sync
copies which are only supported for block blobs.
:param bool incremental_copy:
Performs an incremental copy operation on a page blob instead of a standard copy operation.
:param bool requires_sync:
Enforces that the service will not return a response until the copy is complete.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('copy_source', copy_source)
if copy_source.startswith('/'):
# Backwards compatibility for earlier versions of the SDK where
# the copy source can be in the following formats:
# - Blob in named container:
# /accountName/containerName/blobName
# - Snapshot in named container:
# /accountName/containerName/blobName?snapshot=<DateTime>
# - Blob in root container:
# /accountName/blobName
# - Snapshot in root container:
# /accountName/blobName?snapshot=<DateTime>
account, _, source = \
copy_source.partition('/')[2].partition('/')
copy_source = self.protocol + '://' + \
self.primary_endpoint + '/' + source
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
if incremental_copy:
request.query = {
'comp': 'incrementalcopy',
'timeout': _int_to_str(timeout),
}
else:
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
'x-ms-copy-source': _to_str(copy_source),
'x-ms-source-if-modified-since': _to_str(source_if_modified_since),
'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since),
'x-ms-source-if-match': _to_str(source_if_match),
'x-ms-source-if-none-match': _to_str(source_if_none_match),
'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since),
'If-Match': _to_str(destination_if_match),
'If-None-Match': _to_str(destination_if_none_match),
'x-ms-lease-id': _to_str(destination_lease_id),
'x-ms-source-lease-id': _to_str(source_lease_id),
'x-ms-access-tier': _to_str(premium_page_blob_tier) or _to_str(standard_blob_tier),
'x-ms-requires-sync': _to_str(requires_sync),
'x-ms-rehydrate-priority': _to_str(rehydrate_priority)
}
_add_metadata_headers(metadata, request)
return self._perform_request(request, _parse_properties, [BlobProperties]).copy
def abort_copy_blob(self, container_name, blob_name, copy_id,
lease_id=None, timeout=None):
'''
Aborts a pending copy_blob operation, and leaves a destination blob
with zero length and full metadata.
:param str container_name:
Name of destination container.
:param str blob_name:
Name of destination blob.
:param str copy_id:
Copy identifier provided in the copy.id of the original
copy_blob operation.
:param str lease_id:
Required if the destination blob has an active infinite lease.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('copy_id', copy_id)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'copy',
'copyid': _to_str(copy_id),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'x-ms-copy-action': 'abort',
}
self._perform_request(request)
def delete_blob(self, container_name, blob_name, snapshot=None,
lease_id=None, delete_snapshots=None,
if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Marks the specified blob or snapshot for deletion.
The blob is later deleted during garbage collection.
Note that in order to delete a blob, you must delete all of its
snapshots. You can delete both at the same time with the Delete
Blob operation.
If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
and retains the blob or snapshot for specified number of days.
After specified number of days, blob's data is removed from the service during garbage collection.
Soft deleted blob or snapshot is accessible through List Blobs API specifying include=Include.Deleted option.
Soft-deleted blob or snapshot can be restored using Undelete API.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
The snapshot parameter is an opaque DateTime value that,
when present, specifies the blob snapshot to delete.
:param str lease_id:
Required if the blob has an active lease.
:param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots:
Required if the blob has associated snapshots.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
request = self._get_basic_delete_blob_http_request(container_name,
blob_name,
snapshot=snapshot,
lease_id=lease_id,
delete_snapshots=delete_snapshots,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
self._perform_request(request)
def batch_delete_blobs(self, batch_delete_sub_requests, timeout=None):
'''
Sends a batch of multiple blob delete requests.
The blob delete method deletes the specified blob or snapshot. Note that deleting a blob also deletes all its
snapshots. For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob
:param list(BatchDeleteSubRequest) batch_delete_sub_requests:
The blob delete requests to send as a batch.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: parsed batch delete HTTP response
:rtype: list of :class:`~azure.storage.blob.models.BatchSubResponse`
'''
self._check_batch_request(batch_delete_sub_requests)
request = HTTPRequest()
request.method = 'POST'
request.host_locations = self._get_host_locations()
request.path = _get_path()
batch_id = str(uuid.uuid1())
request.headers = {
'Content-Type': "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False),
}
request.query = {
'comp': 'batch',
'timeout': _int_to_str(timeout)
}
batch_http_requests = []
for batch_delete_sub_request in batch_delete_sub_requests:
batch_delete_sub_http_request = self._construct_batch_delete_sub_http_request(len(batch_http_requests),
batch_delete_sub_request)
batch_http_requests.append(batch_delete_sub_http_request)
request.body = _serialize_batch_body(batch_http_requests, batch_id)
return self._perform_request(request, parser=_ingest_batch_response, parser_args=[batch_delete_sub_requests])
def _construct_batch_delete_sub_http_request(self, content_id, batch_delete_sub_request):
"""
Construct an HTTPRequest instance from a batch delete sub-request.
:param int content_id:
the index of sub-request in the list of sub-requests
:param ~azure.storage.blob.models.BatchDeleteSubRequest batch_delete_sub_request:
one of the delete request to be sent in a batch
:return: HTTPRequest parsed from batch delete sub-request
:rtype: :class:`~azure.storage.common._http.HTTPRequest`
"""
request = self._get_basic_delete_blob_http_request(batch_delete_sub_request.container_name,
batch_delete_sub_request.blob_name,
snapshot=batch_delete_sub_request.snapshot,
lease_id=batch_delete_sub_request.lease_id,
delete_snapshots=batch_delete_sub_request.delete_snapshots,
if_modified_since=batch_delete_sub_request.if_modified_since,
if_unmodified_since=batch_delete_sub_request.if_unmodified_since,
if_match=batch_delete_sub_request.if_match,
if_none_match=batch_delete_sub_request.if_none_match)
request.headers.update({
'Content-ID': _int_to_str(content_id),
'Content-Length': _int_to_str(0),
'Content-Transfer-Encoding': 'binary',
})
_update_request(request, None, self._USER_AGENT_STRING)
# sub-request will use the batch request id automatically, no need to generate a separate one
request.headers.pop('x-ms-client-request-id', None)
_add_date_header(request)
self.authentication.sign_request(request)
return request
def _get_basic_delete_blob_http_request(self, container_name, blob_name, snapshot=None, lease_id=None,
delete_snapshots=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
"""
Construct a basic HTTPRequest instance for delete blob
For more information about the parameters please see delete_blob
:return: an HTTPRequest for delete blob
:rtype :class:`~azure.storage.common._http.HTTPRequest`
"""
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'x-ms-delete-snapshots': _to_str(delete_snapshots),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
request.query = {
'snapshot': _to_str(snapshot),
'timeout': _int_to_str(timeout)
}
return request
@staticmethod
def _check_batch_request(request):
if request is None or len(request) < 1 or len(request) > 256:
raise ValueError("Batch request should take 1 to 256 sub-requests")
def undelete_blob(self, container_name, blob_name, timeout=None):
'''
The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot.
Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'undelete',
'timeout': _int_to_str(timeout)
}
self._perform_request(request)
|
OnroerendErfgoed/VioeConvertor | refs/heads/master | config/__init__.py | 12133432 | |
williamn/notejam | refs/heads/master | django/notejam/users/__init__.py | 12133432 | |
MentorEmbedded/qmtest | refs/heads/master | qm/dist/__init__.py | 12133432 | |
MartinPetkov/django-security | refs/heads/master | security/migrations/__init__.py | 12133432 | |
Opentaste/bombolone | refs/heads/master | bombolone/tests/python/__init__.py | 12133432 | |
illicitonion/givabit | refs/heads/master | lib/sdks/google_appengine_1.7.1/google_appengine/lib/PyAMF/doc/tutorials/examples/actionscript/bytearray/python/client.py | 8 | # Copyright (c) The PyAMF Project.
# See LICENSE for details.
"""
Python ByteArray example.
@since: 0.5
"""
import os
from optparse import OptionParser
from gateway import images_root
from pyamf.amf3 import ByteArray
from pyamf.remoting.client import RemotingService
# parse commandline options
parser = OptionParser()
parser.add_option("-p", "--port", default=8000,
dest="port", help="port number [default: %default]")
parser.add_option("--host", default="127.0.0.1",
dest="host", help="host address [default: %default]")
(options, args) = parser.parse_args()
# define gateway
url = 'http://%s:%d' % (options.host, int(options.port))
server = RemotingService(url)
service = server.getService('getSnapshots')()
# get list of snapshots
base_path = service[0]
types = service[1]
snapshots = service[2]
print "Found %d snapshot(s):" % (len(snapshots))
for snapshot in snapshots:
print "\t%s%s" % (base_path, snapshot['name'])
# save snapshot
path = 'django-logo.jpg'
image = os.path.join(images_root, path)
file = open(image, 'r').read()
snapshot = ByteArray()
snapshot.write(file)
save_snapshot = server.getService('ByteArray.saveSnapshot')
saved = save_snapshot(snapshot, 'jpg')
print "Saved snapshot:\n\t%s:\t%s" % (saved['name'], saved['url'])
|
anryko/ansible | refs/heads/devel | lib/ansible/module_utils/network/junos/config/l2_interfaces/__init__.py | 12133432 | |
leonardowolf/bookfree | refs/heads/master | app/controllers/__init__.py | 12133432 | |
lliss/model-my-watershed | refs/heads/develop | src/mmw/apps/core/migrations/__init__.py | 12133432 | |
NixOS/nixops | refs/heads/master | nixops/ssh_util.py | 1 | # -*- coding: utf-8 -*-
import atexit
import os
import shlex
import subprocess
import sys
import time
import weakref
from tempfile import mkdtemp
from typing import Dict, Any, Optional, Callable, List, Union, Iterable, Tuple, cast
import nixops.util
from nixops.logger import MachineLogger
__all__ = ["SSHConnectionFailed", "SSHCommandFailed", "SSH"]
class SSHConnectionFailed(Exception):
pass
class SSHCommandFailed(nixops.util.CommandFailed):
pass
class SSHMaster(object):
def __init__(
self,
target: str,
logger: MachineLogger,
ssh_flags: List[str] = [],
passwd: Optional[str] = None,
user: Optional[str] = None,
compress: bool = False,
ssh_quiet: Optional[bool] = False,
) -> None:
self._running: bool = False
self._tempdir: nixops.util.SelfDeletingDir = nixops.util.SelfDeletingDir(
mkdtemp(prefix="nixops-ssh-tmp")
)
self._askpass_helper: Optional[str] = None
self._control_socket: str = self._tempdir + "/master-socket"
self._ssh_target: str = target
pass_prompts: int = 0 if "-i" in ssh_flags and user is None else 3
kwargs: Dict[str, Any] = {}
if passwd is not None:
self._askpass_helper = self._make_askpass_helper()
newenv = dict(os.environ)
newenv.update(
{
"DISPLAY": ":666",
"SSH_ASKPASS": self._askpass_helper,
"NIXOPS_SSH_PASSWORD": passwd,
}
)
kwargs["env"] = newenv
kwargs["stdin"] = nixops.util.devnull
kwargs["preexec_fn"] = os.setsid
pass_prompts = 1
if ssh_quiet:
kwargs["capture_stderr"] = False
cmd = (
[
"ssh",
"-x",
self._ssh_target,
"-S",
self._control_socket,
"-M",
"-N",
"-f",
"-oNumberOfPasswordPrompts={0}".format(pass_prompts),
"-oServerAliveInterval=60",
"-oControlPersist=600",
]
+ (["-C"] if compress else [])
+ ssh_flags
)
res = nixops.util.logged_exec(cmd, logger, **kwargs)
if res != 0:
raise SSHConnectionFailed(
"unable to start SSH master connection to " "‘{0}’".format(target)
)
self.opts = ["-oControlPath={0}".format(self._control_socket)]
timeout = 60.0
while not self.is_alive():
if timeout < 0:
raise SSHConnectionFailed(
"could not establish an SSH master socket to "
"‘{0}’ within 60 seconds".format(target)
)
time.sleep(0.1)
timeout -= 0.1
self._running = True
weakself = weakref.ref(self)
def maybe_shutdown() -> None:
realself = weakself()
if realself is not None:
realself.shutdown()
atexit.register(maybe_shutdown)
def is_alive(self) -> bool:
"""
Check whether the control socket is still existing.
"""
return os.path.exists(self._control_socket)
def _make_askpass_helper(self) -> str:
"""
Create a SSH_ASKPASS helper script, which just outputs the contents of
the environment variable NIXOPS_SSH_PASSWORD.
"""
path = os.path.join(self._tempdir, "nixops-askpass-helper")
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_NOFOLLOW, 0o700)
os.write(
fd,
"""#!{0}
import sys
import os
sys.stdout.write(os.environ['NIXOPS_SSH_PASSWORD'])""".format(
sys.executable
).encode(),
)
os.close(fd)
return path
def shutdown(self) -> None:
"""
Shutdown master process and clean up temporary files.
"""
if not self._running:
return
self._running = False
subprocess.call(
["ssh", self._ssh_target, "-S", self._control_socket, "-O", "exit"],
stderr=nixops.util.devnull,
)
def __del__(self) -> None:
self.shutdown()
Command = Union[str, Iterable[str]]
class SSH(object):
def __init__(self, logger: MachineLogger):
"""
Initialize a SSH object with the specified Logger instance, which will
be used to write SSH output to.
"""
self._flag_fun: Callable[[], List[str]] = lambda: []
self._host_fun: Optional[Callable[[], str]] = None
self._passwd_fun: Callable[[], Optional[str]] = lambda: None
self._logger = logger
self._ssh_master: Optional[SSHMaster] = None
self._compress = False
self.privilege_escalation_command: List[str] = []
def register_host_fun(self, host_fun: Callable[[], str]) -> None:
"""
Register a function which returns the hostname or IP to connect to. The
function has to require no arguments.
"""
self._host_fun = host_fun
def _get_target(self, user: str) -> str:
if self._host_fun is None:
raise AssertionError("don't know which SSH host to connect to")
return "{0}@{1}".format(user, self._host_fun())
def register_flag_fun(self, flag_fun: Callable[[], List[str]]) -> None:
"""
Register a function that is used for obtaining additional SSH flags.
The function has to require no arguments and should return a list of
strings, each being a SSH flag/argument.
"""
self._flag_fun = flag_fun
def _get_flags(self) -> List[str]:
return self._flag_fun()
def register_passwd_fun(self, passwd_fun: Callable[[], Optional[str]]) -> None:
"""
Register a function that returns either a string or None and requires
no arguments. If the return value is a string, the returned string is
used for keyboard-interactive authentication, if it is None, no attempt
is made to inject a password.
"""
self._passwd_fun = passwd_fun
def _get_passwd(self) -> Optional[str]:
return self._passwd_fun()
def reset(self) -> None:
"""
Reset SSH master connection.
"""
if self._ssh_master is not None:
self._ssh_master.shutdown()
self._ssh_master = None
def get_master(
self,
user: str,
flags: List[str] = [],
timeout: Optional[int] = None,
tries: int = 5,
ssh_quiet: Optional[bool] = False,
) -> SSHMaster:
"""
Start (if necessary) an SSH master connection to speed up subsequent
SSH sessions. Returns the SSHMaster instance on success.
"""
flags = flags + self._get_flags()
if self._ssh_master is not None:
master = weakref.proxy(self._ssh_master)
if master.is_alive():
return master
else:
master.shutdown()
tries = tries
if timeout is not None:
flags = flags + ["-o", "ConnectTimeout={0}".format(timeout)]
tries = 1
if self._host_fun is None:
raise AssertionError("don't know which SSH host to connect to")
elif self._host_fun() == "localhost":
tries = 1
sleep_time = 1
while True:
try:
self._ssh_master = SSHMaster(
self._get_target(user),
self._logger,
flags,
self._get_passwd(),
user,
compress=self._compress,
ssh_quiet=ssh_quiet,
)
break
except Exception:
tries = tries - 1
if tries == 0:
raise
msg = "could not connect to ‘{0}’, retrying in {1} seconds..."
self._logger.log(msg.format(self._get_target(user), sleep_time))
time.sleep(sleep_time)
sleep_time = sleep_time * 2
pass
return weakref.proxy(self._ssh_master)
@classmethod
def split_openssh_args(self, args: Iterable[str]) -> Tuple[List[str], Command]:
"""
Splits the specified list of arguments into a tuple consisting of the
list of flags and a list of strings for the actual command.
"""
non_option_args = "bcDEeFIiLlmOopQRSWw"
flags = []
command = list(args)
while len(command) > 0:
arg = command.pop(0)
if arg == "--":
break
elif arg.startswith("-"):
if len(command) > 0 and arg[1] in non_option_args:
flags.append(arg)
if len(arg) == 2:
flags.append(command.pop(0))
elif len(arg) > 2 and arg[1] != "-":
flags.append(arg[:2])
command.insert(0, "-" + arg[2:])
else:
flags.append(arg)
else:
command.insert(0, arg)
break
return (flags, command)
def _format_command(
self, command: Command, user: str, allow_ssh_args: bool,
) -> Iterable[str]:
"""
Helper method for run_command, which essentially prepares and properly
escape the command. See run_command() for further description.
"""
# Don't make assumptions about remote login shell
cmd: List[str] = ["bash", "-c"]
if isinstance(command, str):
if allow_ssh_args:
return shlex.split(command)
else:
cmd.append(command)
# iterable
elif allow_ssh_args:
return command
else:
cmd.append(
" ".join(["'{0}'".format(arg.replace("'", r"'\''")) for arg in command])
)
if user and user != "root":
cmd = self.privilege_escalation_command + cmd
return ["--", nixops.util.shlex_join(cmd)]
def run_command(
self,
command: Command,
user: str,
flags: List[str] = [],
timeout: Optional[int] = None,
logged: bool = True,
allow_ssh_args: bool = False,
connection_tries: int = 5,
ssh_quiet: Optional[bool] = False,
**kwargs: Any
) -> Union[str, int]:
"""
Execute a 'command' on the current target host using SSH, passing
'flags' as additional arguments to SSH. The command can be either a
string or an iterable of strings, whereby if it's the latter, it will
be joined with spaces and properly shell-escaped.
If 'allow_ssh_args' is set to True, the specified command may contain
SSH flags.
The 'user' argument specifies the remote user to connect as. If unset
or None, the default is "root".
All keyword arguments except timeout and user are passed as-is to
nixops.util.logged_exec(), though if you set 'logged' to False, the
keyword arguments are passed as-is to subprocess.call() and the command
is executed interactively with no logging.
'timeout' specifies the SSH connection timeout.
'ssh_quiet' spawns a master ssh session, if needed, with stderr suppressed
"""
master = self.get_master(
flags=flags,
timeout=timeout,
user=user,
tries=connection_tries,
ssh_quiet=True if ssh_quiet else False,
)
flags = flags + self._get_flags()
if logged:
flags.append("-x")
cmd = ["ssh"] + master.opts + flags
cmd.append(self._get_target(user))
cmd += self._format_command(command, user=user, allow_ssh_args=allow_ssh_args)
if logged:
try:
return nixops.util.logged_exec(cmd, self._logger, **kwargs)
except nixops.util.CommandFailed as exc:
raise SSHCommandFailed(exc.message, exc.exitcode)
else:
check = kwargs.pop("check", True)
res = subprocess.call(cmd, **kwargs)
if check and res != 0:
msg = "command ‘{0}’ failed on host ‘{1}’"
err = msg.format(cmd, self._get_target(user))
raise SSHCommandFailed(err, res)
else:
return res
def run_command_get_stdout(
self,
command: Command,
user: str,
flags: List[str] = [],
timeout: Optional[int] = None,
logged: bool = True,
allow_ssh_args: bool = False,
**kwargs: Any
) -> str:
assert kwargs.get("capture_stdout", True) is True
kwargs["capture_stdout"] = True
return cast(
str,
self.run_command(
command=command,
flags=flags,
timeout=timeout,
logged=logged,
allow_ssh_args=allow_ssh_args,
user=user,
**kwargs
),
)
def run_command_get_status(
self,
command: Command,
user: str,
flags: List[str] = [],
timeout: Optional[int] = None,
logged: bool = True,
allow_ssh_args: bool = False,
**kwargs: Any
) -> int:
assert kwargs.get("capture_stdout", False) is False
kwargs["capture_stdout"] = False
return cast(
int,
self.run_command(
command=command,
flags=flags,
timeout=timeout,
logged=logged,
allow_ssh_args=allow_ssh_args,
user=user,
**kwargs
),
)
def enable_compression(self) -> None:
self._compress = True
|
TimBizeps/BachelorAP | refs/heads/master | V303_LockInVerstärker/auswertung.py | 1 | import matplotlib as mpl
mpl.use('pgf')
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from uncertainties import ufloat
import uncertainties.unumpy as unp
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
x, y, z = np.genfromtxt('messwerte.txt', unpack=True)
x=x*np.pi
def f(x, a, b):
return a*np.cos(x+b)
params, covariance = curve_fit(f, x, y)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '±', errors[0])
print('b =', params[1]+2*np.pi, '±', errors[1])
x_plot=np.linspace(0, 6.9)
plt.plot(x, y, 'rx', label="Messwerte")
plt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit-Funktion', linewidth=1)
plt.legend(loc="best")
plt.xticks([0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi],
[r"$0$", r"$\frac{1}{2} \pi$", r"$\pi$", r"$\frac{3}{2} \pi$", r"$2 \pi$"])
plt.xlabel(r'Phase $\phi$ im Bogenmaß')
plt.ylabel(r'Ausgangsspannung \,/\, $\si{\volt}$')
plt.title('Inaktiver Noise Generator')
plt.tight_layout()
plt.savefig("Plot.pdf")
|
kaarl/pyload | refs/heads/stable | module/plugins/crypter/BigfileToFolder.py | 2 | # -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class BigfileToFolder(SimpleCrypter):
__name__ = "BigfileToFolder"
__type__ = "crypter"
__version__ = "0.09"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(?:uploadable\.ch|bigfile\.to)/list/\w+'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available" , True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default"),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """bigfile.to folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("guidobelix", "guidobelix@hotmail.it" ),
("Walter Purcaro", "vuolter@gmail.com" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
URL_REPLACEMENTS = [("https?://uploadable\.ch", "https://bigfile.to")]
LINK_PATTERN = r'"(.+?)" class="icon_zipfile">'
NAME_PATTERN = r'<div class="folder"><span> </span>(?P<N>.+?)</div>'
OFFLINE_PATTERN = r'We are sorry... The URL you entered cannot be found on the server.'
TEMP_OFFLINE_PATTERN = r'<div class="icon_err">'
|
sirex/atviriduomenys.lt | refs/heads/master | adlt/frontpage/api.py | 1 | from django.http import Http404
from django.contrib.auth.decorators import login_required
from django.db.models import F
from adlt.common.helpers import ajax
import adlt.core.models as core_models
@ajax.request('GET')
def agent_list(request): # pylint: disable=unused-argument
return [
{
'pk': agent.pk,
'title': agent.title,
}
for agent in core_models.Agent.objects.all()
]
@login_required
@ajax.request('GET')
def like_toggle(request, object_type, object_id):
object_types = {
'dataset': core_models.Dataset,
'project': core_models.Project,
}
if object_type not in object_types:
raise Http404
qs = core_models.Likes.objects.filter(user=request.user, object_type=object_type, object_id=object_id)
if qs.exists():
object_types[object_type].objects.filter(pk=object_id).update(likes=F('likes') - 1)
qs.delete()
else:
object_types[object_type].objects.filter(pk=object_id).update(likes=F('likes') + 1)
core_models.Likes.objects.create(user=request.user, object_type=object_type, object_id=object_id)
return {'status': 'ok'}
|
shoheietzel/proj5-maps | refs/heads/master | env/lib/python3.6/site-packages/nose/commands.py | 11 | """
nosetests setuptools command
----------------------------
The easiest way to run tests with nose is to use the `nosetests` setuptools
command::
python setup.py nosetests
This command has one *major* benefit over the standard `test` command: *all
nose plugins are supported*.
To configure the `nosetests` command, add a [nosetests] section to your
setup.cfg. The [nosetests] section can contain any command line arguments that
nosetests supports. The differences between issuing an option on the command
line and adding it to setup.cfg are:
* In setup.cfg, the -- prefix must be excluded
* In setup.cfg, command line flags that take no arguments must be given an
argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
Here's an example [nosetests] setup.cfg section::
[nosetests]
verbosity=1
detailed-errors=1
with-coverage=1
cover-package=nose
debug=nose.loader
pdb=1
pdb-failures=1
If you commonly run nosetests with a large number of options, using
the nosetests setuptools command and configuring with setup.cfg can
make running your tests much less tedious. (Note that the same options
and format supported in setup.cfg are supported in all other config
files, and the nosetests script will also load config files.)
Another reason to run tests with the command is that the command will
install packages listed in your `tests_require`, as well as doing a
complete build of your package before running tests. For packages with
dependencies or that build C extensions, using the setuptools command
can be more convenient than building by hand and running the nosetests
script.
Bootstrapping
-------------
If you are distributing your project and want users to be able to run tests
without having to install nose themselves, add nose to the setup_requires
section of your setup()::
setup(
# ...
setup_requires=['nose>=1.0']
)
This will direct setuptools to download and activate nose during the setup
process, making the ``nosetests`` command available.
"""
try:
from setuptools import Command
except ImportError:
Command = nosetests = None
else:
from nose.config import Config, option_blacklist, user_config_files, \
flag, _bool
from nose.core import TestProgram
from nose.plugins import DefaultPluginManager
def get_user_options(parser):
"""convert a optparse option list into a distutils option tuple list"""
opt_list = []
for opt in parser.option_list:
if opt._long_opts[0][2:] in option_blacklist:
continue
long_name = opt._long_opts[0][2:]
if opt.action not in ('store_true', 'store_false'):
long_name = long_name + "="
short_name = None
if opt._short_opts:
short_name = opt._short_opts[0][1:]
opt_list.append((long_name, short_name, opt.help or ""))
return opt_list
class nosetests(Command):
description = "Run unit tests using nosetests"
__config = Config(files=user_config_files(),
plugins=DefaultPluginManager())
__parser = __config.getParser()
user_options = get_user_options(__parser)
def initialize_options(self):
"""create the member variables, but change hyphens to
underscores
"""
self.option_to_cmds = {}
for opt in self.__parser.option_list:
cmd_name = opt._long_opts[0][2:]
option_name = cmd_name.replace('-', '_')
self.option_to_cmds[option_name] = cmd_name
setattr(self, option_name, None)
self.attr = None
def finalize_options(self):
"""nothing to do here"""
pass
def run(self):
"""ensure tests are capable of being run, then
run nose.main with a reconstructed argument list"""
if getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
build_py = self.get_finalized_command('build_py')
build_py.inplace = 0
build_py.run()
bpy_cmd = self.get_finalized_command("build_py")
build_path = bpy_cmd.build_lib
# Build extensions
egg_info = self.get_finalized_command('egg_info')
egg_info.egg_base = build_path
egg_info.run()
build_ext = self.get_finalized_command('build_ext')
build_ext.inplace = 0
build_ext.run()
else:
self.run_command('egg_info')
# Build extensions in-place
build_ext = self.get_finalized_command('build_ext')
build_ext.inplace = 1
build_ext.run()
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(
self.distribution.tests_require)
ei_cmd = self.get_finalized_command("egg_info")
argv = ['nosetests', '--where', ei_cmd.egg_base]
for (option_name, cmd_name) in list(self.option_to_cmds.items()):
if option_name in option_blacklist:
continue
value = getattr(self, option_name)
if value is not None:
argv.extend(
self.cfgToArg(option_name.replace('_', '-'), value))
TestProgram(argv=argv, config=self.__config)
def cfgToArg(self, optname, value):
argv = []
long_optname = '--' + optname
opt = self.__parser.get_option(long_optname)
if opt.action in ('store_true', 'store_false'):
if not flag(value):
raise ValueError("Invalid value '%s' for '%s'" % (
value, optname))
if _bool(value):
argv.append(long_optname)
else:
argv.extend([long_optname, value])
return argv
|
tlksio/tlksio | refs/heads/develop | env/lib/python3.4/site-packages/logilab/common/logging_ext.py | 93 | # -*- coding: utf-8 -*-
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extends the logging module from the standard library."""
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from six import string_types
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None):
"""get an apropriate handler according to given parameters"""
if os.environ.get('APYCOT_ROOT'):
handler = logging.StreamHandler(sys.stdout)
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
if os.name == 'posix' and sys.version_info >= (2, 6):
from logging.handlers import WatchedFileHandler
handler = WatchedFileHandler(logfile)
else:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(
logfile, **rotation_parameters)
except IOError:
handler = logging.StreamHandler()
return handler
def get_threshold(debug=False, logthreshold=None):
if logthreshold is None:
if debug:
logthreshold = logging.DEBUG
else:
logthreshold = logging.ERROR
elif isinstance(logthreshold, string_types):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
return logthreshold
def _colorable_terminal():
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if not isatty:
return False
if os.name == 'nt':
try:
from colorama import init as init_win32_colors
except ImportError:
return False
init_win32_colors()
return True
def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT):
if _colorable_terminal():
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
return fmt
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None,
rotation_parameters=None, handler=None):
"""init the log service"""
logger = logging.getLogger()
if handler is None:
handler = get_handler(debug, syslog, logfile, rotation_parameters)
# only addHandler and removeHandler method while I would like a setHandler
# method, so do it this way :$
logger.handlers = [handler]
logthreshold = get_threshold(debug, logthreshold)
logger.setLevel(logthreshold)
if fmt is None:
if debug:
fmt = get_formatter(logformat=logformat, logdateformat=logdateformat)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
|
google-code-export/django-profile | refs/heads/master | userprofile/templatetags/avatars.py | 8 | # coding=UTF-8
from django.template import Library, Node, Template, TemplateSyntaxError, \
Variable
from django.utils.translation import ugettext as _
from userprofile.models import Avatar, AVATAR_SIZES, S3BackendNotFound, \
DEFAULT_AVATAR_SIZE, DEFAULT_AVATAR, DEFAULT_AVATAR_FOR_INACTIVES_USER, \
SAVE_IMG_PARAMS
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
import urllib
from cStringIO import StringIO
from django.conf import settings
try:
from PIL import Image
except ImportError:
import Image
# from PythonMagick import Image
#from utils.TuxieMagick import Image
import os
import urlparse
import time
from django.core.files.storage import default_storage
if hasattr(settings, "AWS_SECRET_ACCESS_KEY"):
try:
from backends.S3Storage import S3Storage
storage = S3Storage()
except ImportError:
raise S3BackendNotFound
else:
storage = default_storage
register = Library()
class ResizedThumbnailNode(Node):
def __init__(self, size, username=None):
try:
self.size = int(size)
except:
self.size = Variable(size)
if username:
self.user = Variable(username)
else:
self.user = Variable("user")
def render(self, context):
# If size is not an int, then it's a Variable, so try to resolve it.
size = self.size
if not isinstance(size, int):
size = int(self.size.resolve(context))
if not size in AVATAR_SIZES:
return ''
try:
user = self.user.resolve(context)
if DEFAULT_AVATAR_FOR_INACTIVES_USER and not user.is_active:
raise
avatar = Avatar.objects.get(user=user, valid=True).image
if hasattr(settings, "AWS_SECRET_ACCESS_KEY"):
avatar_path = avatar.name
else:
avatar_path = avatar.path
if not storage.exists(avatar_path):
raise
base, filename = os.path.split(avatar_path)
name, extension = os.path.splitext(filename)
filename = os.path.join(base, "%s.%s%s" % (name, size, extension))
base_url = avatar.url
except:
avatar_path = DEFAULT_AVATAR
avatar = open(avatar_path)
base, filename = os.path.split(avatar_path)
name, extension = os.path.splitext(filename)
filename = os.path.join(base, "%s.%s%s" % (name, size, extension))
base_url = filename.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
url_tuple = urlparse.urlparse(base_url)
url = urlparse.urljoin(urllib.unquote(urlparse.urlunparse(url_tuple)), "%s.%s%s" % (name, size, extension))
if not storage.exists(filename):
thumb = Image.open(ContentFile(avatar.read()))
img_format = thumb.format
if not getattr(settings, 'CAN_ENLARGE_AVATAR', True ) or (thumb.size[0] > size or thumb.size[1] > size or not hasattr(thumb, 'resize')):
thumb.thumbnail((size, size), Image.ANTIALIAS)
else:
thumb = thumb.resize((size, size), Image.BICUBIC)
f = StringIO()
try:
thumb.save(f, img_format, **SAVE_IMG_PARAMS.get(img_format, {}))
except:
thumb.save(f, img_format)
f.seek(0)
storage.save(filename, ContentFile(f.read()))
return url
@register.tag('avatar')
def Thumbnail(parser, token):
bits = token.contents.split()
username = None
if len(bits) > 3:
raise TemplateSyntaxError, _(u"You have to provide only the size as \
an integer (both sides will be equal) and optionally, the \
username.")
elif len(bits) == 3:
username = bits[2]
elif len(bits) < 2:
bits.append(str(DEFAULT_AVATAR_SIZE))
return ResizedThumbnailNode(bits[1], username)
|
SergeySatskiy/cdm-flowparser | refs/heads/master | tests/empty_brackets.py | 3 | a = ()
b = []
c = {}
def f():
return ()
def g():
return []
def h():
return {}
|
daoluan/decode-Django | refs/heads/master | Django-1.5.1/django/contrib/gis/gdal/__init__.py | 104 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except Exception:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
|
janocat/odoo | refs/heads/8.0 | openerp/addons/test_exceptions/models.py | 336 | # -*- coding: utf-8 -*-
import openerp.exceptions
import openerp.osv.orm
import openerp.osv.osv
import openerp.tools.safe_eval
class m(openerp.osv.osv.Model):
""" This model exposes a few methods that will raise the different
exceptions that must be handled by the server (and its RPC layer)
and the clients.
"""
_name = 'test.exceptions.model'
def generate_except_osv(self, cr, uid, ids, context=None):
# title is ignored in the new (6.1) exceptions
raise openerp.osv.osv.except_osv('title', 'description')
def generate_except_orm(self, cr, uid, ids, context=None):
# title is ignored in the new (6.1) exceptions
raise openerp.osv.orm.except_orm('title', 'description')
def generate_warning(self, cr, uid, ids, context=None):
raise openerp.exceptions.Warning('description')
def generate_redirect_warning(self, cr, uid, ids, context=None):
dummy, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'test_exceptions', 'action_test_exceptions')
raise openerp.exceptions.RedirectWarning('description', action_id, 'go to the redirection')
def generate_access_denied(self, cr, uid, ids, context=None):
raise openerp.exceptions.AccessDenied()
def generate_access_error(self, cr, uid, ids, context=None):
raise openerp.exceptions.AccessError('description')
def generate_exc_access_denied(self, cr, uid, ids, context=None):
raise Exception('AccessDenied')
def generate_undefined(self, cr, uid, ids, context=None):
self.surely_undefined_symbol
def generate_except_osv_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_except_osv, context)
def generate_except_orm_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_except_orm, context)
def generate_warning_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_warning, context)
def generate_redirect_warning_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_redirect_warning, context)
def generate_access_denied_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_access_denied, context)
def generate_access_error_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_access_error, context)
def generate_exc_access_denied_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_exc_access_denied, context)
def generate_undefined_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_undefined, context)
def generate_safe_eval(self, cr, uid, ids, f, context):
globals_dict = { 'generate': lambda *args: f(cr, uid, ids, context) }
openerp.tools.safe_eval.safe_eval("generate()", mode='exec', globals_dict=globals_dict)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bnq4ever/gypgoogle | refs/heads/master | test/mac/gyptest-postbuild-multiple-configurations.py | 349 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a postbuild work in projects with multiple configurations.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'postbuild-multiple-configurations'
test.run_gyp('test.gyp', chdir=CHDIR)
for configuration in ['Debug', 'Release']:
test.set_configuration(configuration)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_exist('postbuild-file', chdir=CHDIR)
test.pass_test()
|
noiselabs/box-linux-sync | refs/heads/master | src/noiselabs/box/main.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of box-linux-sync.
#
# Copyright (C) 2013 Vítor Brandão <noisebleed@noiselabs.org>
#
# box-linux-sync is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# box-linux-sync is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with box-linux-sync; if not, see
# <http://www.gnu.org/licenses/>.
from __future__ import print_function
import subprocess
import sys
from optparse import OptionParser
from noiselabs.box import __prog__, __version__
from noiselabs.box.output import BoxConsole
from noiselabs.box.setup import BoxSetup
class NoiselabsOptionParser(OptionParser):
"""
A quick'n'dirty version of optparse OptionParser that redefines
format_epilog to allow newlines at will,
"""
def format_epilog(self, formatter):
return self.epilog
def box_main(args=None):
"""
@param args: command arguments (default: sys.argv[1:])
@type args: list
"""
if args is None:
args = sys.argv[1:]
prog = __prog__
version = __version__
description = "Box.com command-line interface"
usage = "Usage: %prog [options] <command>"
force_help = "forces the execution of every procedure even if the component " +\
"is already installed and/or configured"
log_help = "log output to ~/.noiselabs/box/box-sync.log"
parser = NoiselabsOptionParser(
usage=usage,
prog=prog,
version=version,
description=description,
epilog=
"""
Commands:
check check box-sync setup and dependencies
setup launch a setup wizard
start start sync service
stop stop sync service
help show this help message and exit
uninstall removes all configuration and cache files installed by box-sync
Workflow:
$ box-sync check && box-sync setup
$ box-sync start
"""
)
parser.add_option("-f", "--force", help=force_help, action="store_true",
dest="force")
parser.add_option("-l", "--log", help=log_help, action="store_true",
dest="log")
parser.add_option("-v", "--verbose", help="be verbose", action="store_true",
dest="verbose")
opts, pargs = parser.parse_args(args=args)
commands = ['check', 'help', 'start', 'stop', 'setup', 'uninstall']
nargs = len(pargs)
# Parse commands
if nargs == 0:
parser.error("no command given")
elif pargs[0] not in commands:
parser.error("unknown command '%s'" % pargs[0])
else:
command = pargs[0]
if command == 'help':
parser.print_help()
sys.exit(0)
bc = BoxConsole(opts, __prog__)
setup = BoxSetup(bc)
if command == 'check':
setup.check()
elif command == 'setup':
setup.wizard()
elif command == 'start':
box_dir = setup.get_box_dir()
bc.debug("Mounting '%s'..." % box_dir)
cmd = "mount %s" % box_dir
if subprocess.call(cmd, shell=True) != 0:
bc.error("Failed to mount sync dir.")
sys.exit(-1)
elif command == 'stop':
box_dir = setup.get_box_dir()
bc.debug("Unmounting '%s'..." % box_dir)
cmd = "umount %s" % box_dir
if subprocess.call(cmd, shell=True) != 0:
bc.error("Failed to unmount sync dir.")
sys.exit(-1)
elif command == 'uninstall':
setup = BoxSetup(bc)
setup.uninstall()
|
aviarypl/mozilla-l10n-addons-server | refs/heads/master | src/olympia/tags/__init__.py | 12133432 | |
bhoberman/HackerRank | refs/heads/master | Algorithm/Strings/pangrams.py | 1 | s = input().strip().lower()
letters = 'abcdefghijklmnopqrstuvwxyz'
pangram = True
for letter in letters:
if letter not in s:
print('not pangram')
pangram = False
break
if pangram:
print('pangram')
|
yosukesuzuki/let-me-notify | refs/heads/master | project/kay/tests/ereporter_testapp/__init__.py | 16 | # -*- coding: utf-8 -*-
# Kay application: server_error_testapp
|
faust64/ansible | refs/heads/devel | lib/ansible/modules/packaging/os/openbsd_pkg.py | 11 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrik Lundin <patrik@sigterm.se>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: openbsd_pkg
author: "Patrik Lundin (@eest)"
version_added: "1.1"
short_description: Manage packages on OpenBSD.
description:
- Manage packages on OpenBSD using the pkg tools.
requirements: [ "python >= 2.5" ]
options:
name:
required: true
description:
- Name of the package.
state:
required: true
choices: [ present, latest, absent ]
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
build:
required: false
choices: [ yes, no ]
default: no
description:
- Build the package from source instead of downloading and installing
a binary. Requires that the port source tree is already installed.
Automatically builds and installs the 'sqlports' package, if it is
not already installed.
version_added: "2.1"
ports_dir:
required: false
default: /usr/ports
description:
- When used in combination with the 'build' option, allows overriding
the default ports source directory.
version_added: "2.1"
clean:
required: false
choices: [ yes, no ]
default: no
description:
- When updating or removing packages, delete the extra configuration
file(s) in the old packages which are annotated with @extra in
the packaging-list.
version_added: "2.3"
quick:
required: false
choices: [ yes, no ]
default: no
description:
- Replace or delete packages quickly; do not bother with checksums
before removing normal files.
version_added: "2.3"
'''
EXAMPLES = '''
# Make sure nmap is installed
- openbsd_pkg:
name: nmap
state: present
# Make sure nmap is the latest version
- openbsd_pkg:
name: nmap
state: latest
# Make sure nmap is not installed
- openbsd_pkg:
name: nmap
state: absent
# Make sure nmap is installed, build it from source if it is not
- openbsd_pkg:
name: nmap
state: present
build: yes
# Specify a pkg flavour with '--'
- openbsd_pkg:
name: vim--no_x11
state: present
# Specify the default flavour to avoid ambiguity errors
- openbsd_pkg:
name: vim--
state: present
# Specify a package branch (requires at least OpenBSD 6.0)
- openbsd_pkg:
name: python%3.5
state: present
# Update all packages on the system
- openbsd_pkg:
name: '*'
state: latest
# Purge a package and it's configuration files
- openbsd_pkg: name=mpd clean=yes state=absent
# Quickly remove a package without checking checksums
- openbsd_pkg: name=qt5 quick=yes state=absent
'''
import os
import platform
import re
import shlex
import sqlite3
from distutils.version import StrictVersion
# Function used for executing commands.
def execute_command(cmd, module):
# Break command line into arguments.
# This makes run_command() use shell=False which we need to not cause shell
# expansion of special characters like '*'.
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
# Function used to find out if a package is currently installed.
def get_package_state(names, pkg_spec, module):
info_cmd = 'pkg_info -Iq'
for name in names:
command = "%s inst:%s" % (info_cmd, name)
rc, stdout, stderr = execute_command(command, module)
if stderr:
module.fail_json(msg="failed in get_package_state(): " + stderr)
if stdout:
# If the requested package name is just a stem, like "python", we may
# find multiple packages with that name.
pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
pkg_spec[name]['installed_state'] = True
else:
pkg_spec[name]['installed_state'] = False
# Function used to make sure a package is present.
def package_present(names, pkg_spec, module):
build = module.params['build']
for name in names:
# It is possible package_present() has been called from package_latest().
# In that case we do not want to operate on the whole list of names,
# only the leftovers.
if pkg_spec['package_latest_leftovers']:
if name not in pkg_spec['package_latest_leftovers']:
module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
continue
else:
module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
if build is True:
port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
if os.path.isdir(port_dir):
if pkg_spec[name]['flavor']:
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
elif pkg_spec[name]['subpackage']:
install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, pkg_spec[name]['subpackage'])
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
else:
module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
else:
install_cmd = 'pkg_add -Im'
if pkg_spec[name]['installed_state'] is False:
# Attempt to install the package
if build is True and not module.check_mode:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
else:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
# a package is found and 1 when it is not. If a version is not
# supplied the tool will exit 0 in both cases.
#
# It is important to note that "version" relates to the
# packages-specs(7) notion of a version. If using the branch syntax
# (like "python%3.5") the version number is considered part of the
# stem, and the pkg_add behavior behaves the same as if the name did
# not contain a version (which it strictly speaking does not).
if pkg_spec[name]['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code for name '%s'" % name)
if pkg_spec[name]['rc']:
pkg_spec[name]['changed'] = False
else:
# Depend on stderr instead.
module.debug("package_present(): depending on stderr for name '%s'" % name)
if pkg_spec[name]['stderr']:
# There is a corner case where having an empty directory in
# installpath prior to the right location will result in a
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
if pkg_spec[name]['style'] == 'branch':
match = re.search("\W%s-[^:]+: ok\W" % pkg_spec[name]['pkgname'], pkg_spec[name]['stdout'])
else:
match = re.search("\W%s-[^:]+: ok\W" % name, pkg_spec[name]['stdout'])
if match:
# It turns out we were able to install the package.
module.debug("package_present(): we were able to install package for name '%s'" % name)
else:
# We really did fail, fake the return code.
module.debug("package_present(): we really did fail for name '%s'" % name)
pkg_spec[name]['rc'] = 1
pkg_spec[name]['changed'] = False
else:
module.debug("package_present(): stderr was not set for name '%s'" % name)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to make sure a package is the latest available version.
def package_latest(names, pkg_spec, module):
if module.params['build'] is True:
module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
upgrade_cmd = 'pkg_add -um'
if module.check_mode:
upgrade_cmd += 'n'
if module.params['clean']:
upgrade_cmd += 'c'
if module.params['quick']:
upgrade_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to upgrade the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search("\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
pkg_spec[name]['changed'] = True
break
# FIXME: This part is problematic. Based on the issues mentioned (and
# handled) in package_present() it is not safe to blindly trust stderr
# as an indicator that the command failed, and in the case with
# empty installpath directories this will break.
#
# For now keep this safeguard here, but ignore it if we managed to
# parse out a successful update above. This way we will report a
# successful run when we actually modify something but fail
# otherwise.
if pkg_spec[name]['changed'] is not True:
if pkg_spec[name]['stderr']:
pkg_spec[name]['rc'] = 1
else:
# Note packages that need to be handled by package_present
module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
pkg_spec['package_latest_leftovers'].append(name)
# If there were any packages that were not installed we call
# package_present() which will handle those.
if pkg_spec['package_latest_leftovers']:
module.debug("package_latest(): calling package_present() to handle leftovers")
package_present(names, pkg_spec, module)
# Function used to make sure a package is not installed.
def package_absent(names, pkg_spec, module):
remove_cmd = 'pkg_delete -I'
if module.check_mode:
remove_cmd += 'n'
if module.params['clean']:
remove_cmd += 'c'
if module.params['quick']:
remove_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to remove the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['changed'] = False
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to parse the package name based on packages-specs(7).
# The general name structure is "stem-version[-flavors]".
#
# Names containing "%" are a special variation not part of the
# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
# description.
def parse_package_name(names, pkg_spec, module):
# Initialize empty list of package_latest() leftovers.
pkg_spec['package_latest_leftovers'] = []
for name in names:
module.debug("parse_package_name(): parsing name: %s" % name)
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
module.fail_json(msg="package name both has a version and is version-less: " + name)
# All information for a given name is kept in the pkg_spec keyed by that name.
pkg_spec[name] = {}
# If name includes a version.
if version_match:
match = re.search("^(?P<stem>.*)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = match.group('version')
pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['style'] = 'version'
else:
module.fail_json(msg="unable to parse package name at version_match: " + name)
# If name includes no version but is version-less ("--").
elif versionless_match:
match = re.search("^(?P<stem>.*)--(?P<flavor>[a-z].*)?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = '-'
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['style'] = 'versionless'
else:
module.fail_json(msg="unable to parse package name at versionless_match: " + name)
# If name includes no version, and is not version-less, it is all a stem.
else:
match = re.search("^(?P<stem>.*)$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = None
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = None
pkg_spec[name]['flavor'] = None
pkg_spec[name]['style'] = 'stem'
else:
module.fail_json(msg="unable to parse package name at else: " + name)
# If the stem contains an "%" then it needs special treatment.
branch_match = re.search("%", pkg_spec[name]['stem'])
if branch_match:
branch_release = "6.0"
if version_match or versionless_match:
module.fail_json(msg="package name using 'branch' syntax also has a version or is version-less: " + name)
if StrictVersion(platform.release()) < StrictVersion(branch_release):
module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
pkg_spec[name]['style'] = 'branch'
# Key names from description in pkg_add(1).
pkg_spec[name]['pkgname'] = pkg_spec[name]['stem'].split('%')[0]
pkg_spec[name]['branch'] = pkg_spec[name]['stem'].split('%')[1]
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec[name]['flavor']:
match = re.search("-$", pkg_spec[name]['flavor'])
if match:
module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
# Function used for figuring out the port path.
def get_package_source_path(name, pkg_spec, module):
pkg_spec[name]['subpackage'] = None
if pkg_spec[name]['stem'] == 'sqlports':
return 'databases/sqlports'
else:
# try for an exact match first
sqlports_db_file = '/usr/local/share/sqlports'
if not os.path.isfile(sqlports_db_file):
module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
conn = sqlite3.connect(sqlports_db_file)
first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
query = first_part_of_query + ' = ?'
module.debug("package_package_source_path(): exact query: %s" % query)
cursor = conn.execute(query, (name,))
results = cursor.fetchall()
# next, try for a fuzzier match
if len(results) < 1:
looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
query = first_part_of_query + ' LIKE ?'
if pkg_spec[name]['flavor']:
looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
cursor = conn.execute(query, (looking_for,))
elif pkg_spec[name]['style'] == 'versionless':
query += ' AND fullpkgname NOT LIKE ?'
module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
else:
module.debug("package_package_source_path(): fuzzy query: %s" % query)
cursor = conn.execute(query, (looking_for,))
results = cursor.fetchall()
# error if we don't find exactly 1 match
conn.close()
if len(results) < 1:
module.fail_json(msg="could not find a port by the name '%s'" % name)
if len(results) > 1:
matches = map(lambda x:x[1], results)
module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
# there's exactly 1 match, so figure out the subpackage, if any, then return
fullpkgpath = results[0][0]
parts = fullpkgpath.split(',')
if len(parts) > 1 and parts[1][0] == '-':
pkg_spec[name]['subpackage'] = parts[1]
return parts[0]
# Function used for upgrading all installed packages.
def upgrade_packages(pkg_spec, module):
if module.check_mode:
upgrade_cmd = 'pkg_add -Imnu'
else:
upgrade_cmd = 'pkg_add -Imu'
# Create a minimal pkg_spec entry for '*' to store return values.
pkg_spec['*'] = {}
# Attempt to upgrade all packages.
pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
# Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok".
match = re.search("\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
if match:
pkg_spec['*']['changed'] = True
else:
pkg_spec['*']['changed'] = False
# It seems we can not trust the return value, so depend on the presence of
# stderr to know if something failed.
if pkg_spec['*']['stderr']:
pkg_spec['*']['rc'] = 1
else:
pkg_spec['*']['rc'] = 0
# ===========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='list'),
state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
build = dict(default='no', type='bool'),
ports_dir = dict(default='/usr/ports'),
quick = dict(default='no', type='bool'),
clean = dict(default='no', type='bool')
),
supports_check_mode = True
)
name = module.params['name']
state = module.params['state']
build = module.params['build']
ports_dir = module.params['ports_dir']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
result['build'] = build
# The data structure used to keep track of package information.
pkg_spec = {}
if build is True:
if not os.path.isdir(ports_dir):
module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
# build sqlports if its not installed yet
parse_package_name(['sqlports'], pkg_spec, module)
get_package_state(['sqlports'], pkg_spec, module)
if not pkg_spec['sqlports']['installed_state']:
module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
package_present(['sqlports'], pkg_spec, module)
asterisk_name = False
for n in name:
if n == '*':
if len(name) != 1:
module.fail_json(msg="the package name '*' can not be mixed with other names")
asterisk_name = True
if asterisk_name:
if state != 'latest':
module.fail_json(msg="the package name '*' is only valid when using state=latest")
else:
# Perform an upgrade of all installed packages.
upgrade_packages(pkg_spec, module)
else:
# Parse package names and put results in the pkg_spec dictionary.
parse_package_name(name, pkg_spec, module)
# Not sure how the branch syntax is supposed to play together
# with build mode. Disable it for now.
for n in name:
if pkg_spec[n]['style'] == 'branch' and module.params['build'] is True:
module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
# Get state for all package names.
get_package_state(name, pkg_spec, module)
# Perform requested action.
if state in ['installed', 'present']:
package_present(name, pkg_spec, module)
elif state in ['absent', 'removed']:
package_absent(name, pkg_spec, module)
elif state == 'latest':
package_latest(name, pkg_spec, module)
# The combined changed status for all requested packages. If anything
# is changed this is set to True.
combined_changed = False
# We combine all error messages in this comma separated string, for example:
# "msg": "Can't find nmapp\n, Can't find nmappp\n"
combined_error_message = ''
# Loop over all requested package names and check if anything failed or
# changed.
for n in name:
if pkg_spec[n]['rc'] != 0:
if pkg_spec[n]['stderr']:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stderr']
else:
combined_error_message = pkg_spec[n]['stderr']
else:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stdout']
else:
combined_error_message = pkg_spec[n]['stdout']
if pkg_spec[n]['changed'] is True:
combined_changed = True
# If combined_error_message contains anything at least some part of the
# list of requested package names failed.
if combined_error_message:
module.fail_json(msg=combined_error_message)
result['changed'] = combined_changed
module.exit_json(**result)
# Import module snippets.
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
ryfeus/lambda-packs | refs/heads/master | Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/shapely/coords.py | 7 | """Coordinate sequence utilities
"""
import sys
from array import array
from ctypes import byref, c_double, c_uint
from shapely.geos import lgeos
from shapely.topology import Validating
if sys.version_info[0] < 3:
range = xrange
class CoordinateSequence(object):
"""
Iterative access to coordinate tuples from the parent geometry's coordinate
sequence.
Example:
>>> from shapely.wkt import loads
>>> g = loads('POINT (0.0 0.0)')
>>> list(g.coords)
[(0.0, 0.0)]
"""
# Attributes
# ----------
# _cseq : c_void_p
# Ctypes pointer to GEOS coordinate sequence
# _ndim : int
# Number of dimensions (2 or 3, generally)
# __p__ : object
# Parent (Shapely) geometry
_cseq = None
_ndim = None
__p__ = None
def __init__(self, parent):
self.__p__ = parent
def _update(self):
self._ndim = self.__p__._ndim
self._cseq = lgeos.GEOSGeom_getCoordSeq(self.__p__._geom)
def __len__(self):
self._update()
cs_len = c_uint(0)
lgeos.GEOSCoordSeq_getSize(self._cseq, byref(cs_len))
return cs_len.value
def __iter__(self):
self._update()
dx = c_double()
dy = c_double()
dz = c_double()
has_z = self._ndim == 3
for i in range(self.__len__()):
lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(dx))
lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(dy))
if has_z:
lgeos.GEOSCoordSeq_getZ(self._cseq, i, byref(dz))
yield (dx.value, dy.value, dz.value)
else:
yield (dx.value, dy.value)
def __getitem__(self, key):
self._update()
dx = c_double()
dy = c_double()
dz = c_double()
m = self.__len__()
has_z = self._ndim == 3
if isinstance(key, int):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(dx))
lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(dy))
if has_z:
lgeos.GEOSCoordSeq_getZ(self._cseq, i, byref(dz))
return (dx.value, dy.value, dz.value)
else:
return (dx.value, dy.value)
elif isinstance(key, slice):
res = []
start, stop, stride = key.indices(m)
for i in range(start, stop, stride):
lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(dx))
lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(dy))
if has_z:
lgeos.GEOSCoordSeq_getZ(self._cseq, i, byref(dz))
res.append((dx.value, dy.value, dz.value))
else:
res.append((dx.value, dy.value))
return res
else:
raise TypeError("key must be an index or slice")
@property
def ctypes(self):
self._update()
has_z = self._ndim == 3
n = self._ndim
m = self.__len__()
array_type = c_double * (m * n)
data = array_type()
temp = c_double()
for i in range(m):
lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(temp))
data[n*i] = temp.value
lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(temp))
data[n*i+1] = temp.value
if has_z:
lgeos.GEOSCoordSeq_getZ(self._cseq, i, byref(temp))
data[n*i+2] = temp.value
return data
def array_interface(self):
"""Provide the Numpy array protocol."""
if sys.byteorder == 'little':
typestr = '<f8'
elif sys.byteorder == 'big':
typestr = '>f8'
else:
raise ValueError(
"Unsupported byteorder: neither little nor big-endian")
ai = {
'version': 3,
'typestr': typestr,
'data': self.ctypes,
}
ai.update({'shape': (len(self), self._ndim)})
return ai
__array_interface__ = property(array_interface)
@property
def xy(self):
"""X and Y arrays"""
self._update()
m = self.__len__()
x = array('d')
y = array('d')
temp = c_double()
for i in range(m):
lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(temp))
x.append(temp.value)
lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(temp))
y.append(temp.value)
return x, y
class BoundsOp(Validating):
def __init__(self, *args):
pass
def __call__(self, this):
self._validate(this)
env = this.envelope
if env.geom_type == 'Point':
return env.bounds
cs = lgeos.GEOSGeom_getCoordSeq(env.exterior._geom)
cs_len = c_uint(0)
lgeos.GEOSCoordSeq_getSize(cs, byref(cs_len))
minx = 1.e+20
maxx = -1e+20
miny = 1.e+20
maxy = -1e+20
temp = c_double()
for i in range(cs_len.value):
lgeos.GEOSCoordSeq_getX(cs, i, byref(temp))
x = temp.value
if x < minx: minx = x
if x > maxx: maxx = x
lgeos.GEOSCoordSeq_getY(cs, i, byref(temp))
y = temp.value
if y < miny: miny = y
if y > maxy: maxy = y
return (minx, miny, maxx, maxy)
|
CyanogenMod/android_kernel_samsung_msm8660-q1 | refs/heads/ics | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
camny125/grit-i18n | refs/heads/master | grit/pseudo_unittest.py | 61 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.pseudo'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import unittest
from grit import pseudo
from grit import tclib
class PseudoUnittest(unittest.TestCase):
def testVowelMapping(self):
self.failUnless(pseudo.MapVowels('abebibobuby') ==
u'\u00e5b\u00e9b\u00efb\u00f4b\u00fcb\u00fd')
self.failUnless(pseudo.MapVowels('ABEBIBOBUBY') ==
u'\u00c5B\u00c9B\u00cfB\u00d4B\u00dcB\u00dd')
def testPseudoString(self):
out = pseudo.PseudoString('hello')
self.failUnless(out == pseudo.MapVowels(u'hePelloPo', True))
def testConsecutiveVowels(self):
out = pseudo.PseudoString("beautiful weather, ain't it?")
self.failUnless(out == pseudo.MapVowels(
u"beauPeautiPifuPul weaPeathePer, aiPain't iPit?", 1))
def testCapitals(self):
out = pseudo.PseudoString("HOWDIE DOODIE, DR. JONES")
self.failUnless(out == pseudo.MapVowels(
u"HOPOWDIEPIE DOOPOODIEPIE, DR. JOPONEPES", 1))
def testPseudoMessage(self):
msg = tclib.Message(text='Hello USERNAME, how are you?',
placeholders=[
tclib.Placeholder('USERNAME', '%s', 'Joi')])
trans = pseudo.PseudoMessage(msg)
# TODO(joi) It would be nicer if 'you' -> 'youPou' instead of
# 'you' -> 'youPyou' and if we handled the silent e in 'are'
self.failUnless(trans.GetPresentableContent() ==
pseudo.MapVowels(
u'HePelloPo USERNAME, hoPow aParePe youPyou?', 1))
if __name__ == '__main__':
unittest.main()
|
elky/django | refs/heads/master | django/conf/locale/eo/__init__.py | 12133432 | |
chepazzo/ansible-modules-extras | refs/heads/devel | cloud/misc/__init__.py | 12133432 | |
ujenmr/ansible | refs/heads/devel | lib/ansible/module_utils/network/dellos10/__init__.py | 12133432 | |
kustodian/ansible | refs/heads/devel | test/integration/targets/module_utils/other_mu_dir/a/b/c/d/e/__init__.py | 12133432 | |
rahul67/hue | refs/heads/master | desktop/core/ext-py/ndg_httpsclient-0.4.0/ndg/httpsclient/ssl_context_util.py | 75 | """ndg_httpsclient SSL Context utilities module containing convenience routines
for setting SSL context configuration.
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "09/12/11"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import sys
if sys.version_info[0] > 2:
import urllib.parse as urlparse_
else:
import urlparse as urlparse_
from OpenSSL import SSL
from ndg.httpsclient.ssl_peer_verification import ServerSSLCertVerification
class SSlContextConfig(object):
"""
Holds configuration options for creating a SSL context. This is used as a
template to create the contexts with specific verification callbacks.
"""
def __init__(self, key_file=None, cert_file=None, pem_file=None, ca_dir=None,
verify_peer=False):
self.key_file = key_file
self.cert_file = cert_file
self.pem_file = pem_file
self.ca_dir = ca_dir
self.verify_peer = verify_peer
def make_ssl_context_from_config(ssl_config=False, url=None):
return make_ssl_context(ssl_config.key_file, ssl_config.cert_file,
ssl_config.pem_file, ssl_config.ca_dir,
ssl_config.verify_peer, url)
def make_ssl_context(key_file=None, cert_file=None, pem_file=None, ca_dir=None,
verify_peer=False, url=None, method=SSL.TLSv1_METHOD,
key_file_passphrase=None):
"""
Creates SSL context containing certificate and key file locations.
"""
ssl_context = SSL.Context(method)
# Key file defaults to certificate file if present.
if cert_file:
ssl_context.use_certificate_file(cert_file)
if key_file_passphrase:
passwd_cb = lambda max_passphrase_len, set_prompt, userdata: \
key_file_passphrase
ssl_context.set_passwd_cb(passwd_cb)
if key_file:
ssl_context.use_privatekey_file(key_file)
elif cert_file:
ssl_context.use_privatekey_file(cert_file)
if pem_file or ca_dir:
ssl_context.load_verify_locations(pem_file, ca_dir)
def _callback(conn, x509, errnum, errdepth, preverify_ok):
"""Default certification verification callback.
Performs no checks and returns the status passed in.
"""
return preverify_ok
verify_callback = _callback
if verify_peer:
ssl_context.set_verify_depth(9)
if url:
set_peer_verification_for_url_hostname(ssl_context, url)
else:
ssl_context.set_verify(SSL.VERIFY_PEER, verify_callback)
else:
ssl_context.set_verify(SSL.VERIFY_NONE, verify_callback)
return ssl_context
def set_peer_verification_for_url_hostname(ssl_context, url,
if_verify_enabled=False):
'''Convenience routine to set peer verification callback based on
ServerSSLCertVerification class'''
if not if_verify_enabled or (ssl_context.get_verify_mode() & SSL.VERIFY_PEER):
urlObj = urlparse_.urlparse(url)
hostname = urlObj.hostname
server_ssl_cert_verif = ServerSSLCertVerification(hostname=hostname)
verify_callback_ = server_ssl_cert_verif.get_verify_server_cert_func()
ssl_context.set_verify(SSL.VERIFY_PEER, verify_callback_)
|
smeissner/eden | refs/heads/master | controllers/msg.py | 6 | # -*- coding: utf-8 -*-
"""
Messaging Module - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def compose():
""" Compose a Message which can be sent to a pentity via a number of different communications channels """
return msg.compose()
# -----------------------------------------------------------------------------
# Send Outbound Messages - to be called via cron
# -----------------------------------------------------------------------------
def process_email():
""" Controller for Email processing - to be called via cron """
msg.process_outbox(contact_method = "EMAIL")
return
# -----------------------------------------------------------------------------
def process_sms():
""" Controller for SMS processing - to be called via cron """
msg.process_outbox(contact_method = "SMS")
return
# -----------------------------------------------------------------------------
def process_twitter():
""" Controller for Twitter message processing - to be called via cron """
msg.process_outbox(contact_method = "TWITTER")
return
# =============================================================================
def outbox():
""" View the contents of the Outbox """
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user", args="login"))
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.message_id.label = T("Message")
table.message_id.writable = False
table.pe_id.readable = True
table.pe_id.label = T("Recipient")
# Subject works for Email but not SMS
table.message_id.represent = lambda id: db(db.msg_log.id == id).select(db.msg_log.message, limitby=(0, 1)).first().message
table.pe_id.represent = lambda id: s3db.pr_pentity_represent(id, default_label = "")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_list = T("View Outbox"),
title_update = T("Edit Message"),
label_list_button = T("View Outbox"),
label_delete_button = T("Delete Message"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in Outbox")
)
add_btn = A(T("Compose"),
_class="action-btn",
_href=URL(f="compose")
)
s3db.configure(tablename, listadd=False)
return s3_rest_controller(module, resourcename, add_btn = add_btn)
# =============================================================================
def log():
"""
RESTful CRUD controller for the Master Message Log
- all Inbound & Outbound Messages go here
@ToDo: Field Labels for i18n
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user", args="login"))
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# CRUD Strings
ADD_MESSAGE = T("Add Message")
s3.crud_strings[tablename] = Storage(
title_create = ADD_MESSAGE,
title_display = T("Message Details"),
title_list = T("Messages"),
title_update = T("Edit message"),
title_search = T("Search messages"),
subtitle_create = T("Send new message"),
label_list_button = T("List Messages"),
label_create_button = ADD_MESSAGE,
msg_record_created = T("Message added"),
msg_record_modified = T("Message updated"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No messages in the system"))
s3db.configure(tablename, listadd=False)
return s3_rest_controller()
# =============================================================================
def tropo():
"""
Receive a JSON POST from the Tropo WebAPI
@see: https://www.tropo.com/docs/webapi/newhowitworks.htm
"""
# Stored in modules/tropo.py
from tropo import Tropo, Session
try:
s = Session(request.body.read())
t = Tropo()
# This is their service contacting us, so parse their request
try:
row_id = s.parameters["row_id"]
# This is an Outbound message which we've requested Tropo to send for us
table = s3db.msg_tropo_scratch
query = (table.row_id == row_id)
row = db(query).select().first()
# Send the message
#t.message(say_obj={"say":{"value":row.message}},to=row.recipient,network=row.network)
t.call(to=row.recipient, network=row.network)
t.say(row.message)
# Update status to sent in Outbox
outbox = s3db.msg_outbox
db(outbox.id == row.row_id).update(status=2)
# Set message log to actioned
log = s3db.msg_log
db(log.id == row.message_id).update(actioned=True)
# Clear the Scratchpad
db(query).delete()
return t.RenderJson()
except:
# This is an Inbound message
try:
message = s.initialText
# This is an SMS/IM
# Place it in the InBox
uuid = s.id
recipient = s.to["id"]
try:
fromaddress = s.fromaddress["id"]
except:
# SyntaxError: s.from => invalid syntax (why!?)
fromaddress = ""
s3db.msg_log.insert(uuid=uuid, fromaddress=fromaddress,
recipient=recipient, message=message,
inbound=True)
# Send the message to the parser
reply = msg.parse_message(message)
t.say([reply])
return t.RenderJson()
except:
# This is a Voice call
# - we can't handle these yet
raise HTTP(501)
except:
# GET request or some random POST
pass
# =============================================================================
def twitter_search():
""" Controller to modify Twitter search queries """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_search_results():
"""
Controller to view tweets from user saved search queries
@ToDo: Action Button to update async
"""
def prep(r):
if r.interactive:
table = r.table
if not db(table.id > 0).select(table.id,
limitby=(0, 1)).first():
# Update results
result = msg.receive_subscribed_tweets()
if not result:
session.error = T("Need to configure Twitter Authentication")
redirect(URL(f="twitter_settings", args=[1, "update"]))
return True
s3.prep = prep
s3db.configure("msg_twitter_search_results",
insertable=False,
editable=False)
return s3_rest_controller()
# =============================================================================
@auth.s3_requires_membership(1)
def setting():
""" SMS settings for the messaging framework """
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.outgoing_sms_handler.label = T("Outgoing SMS handler")
table.outgoing_sms_handler.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Outgoing SMS Handler"),
T("Selects what type of gateway to use for outbound SMS"))))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Edit SMS Settings"),
msg_record_modified = T("SMS settings updated")
)
def prep(r):
if r.http == "POST":
# Go to the details page for the chosen SMS Gateway
outgoing_sms_handler = request.post_vars.get("outgoing_sms_handler",
None)
if outgoing_sms_handler == "WEB_API":
s3db.configure(tablename,
update_next = URL(f="api_settings",
args=[1, "update"]))
elif outgoing_sms_handler == "SMTP":
s3db.configure(tablename,
update_next = URL(f="smtp_to_sms_settings",
args=[1, "update"]))
elif outgoing_sms_handler == "MODEM":
s3db.configure(tablename,
update_next = URL(f="modem_settings",
args=[1, "update"]))
elif outgoing_sms_handler == "TROPO":
s3db.configure(tablename,
update_next = URL(f="tropo_settings",
args=[1, "update"]))
else:
s3db.configure(tablename,
update_next = URL(args=[1, "update"]))
return True
s3.prep = prep
s3db.configure(tablename,
deletable=False,
listadd=False)
#response.menu_options = admin_menu_options
return s3_rest_controller()
# -----------------------------------------------------------------------------
def inbound_email_settings():
"""
RESTful CRUD controller for email settings
- appears in the administration menu
"""
if not auth.s3_has_role(ADMIN):
session.error = UNAUTHORISED
redirect(URL(f="index"))
tablename = "msg_inbound_email_settings"
table = s3db[tablename]
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP)."))))
table.delete_from_server.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading."))))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Setting Details"),
title_list = T("Email Settings"),
title_create = T("Add Email Settings"),
title_update = T("Edit Email Settings"),
title_search = T("Search Email Settings"),
label_list_button = T("View Email Settings"),
label_create_button = T("Add Email Settings"),
msg_record_created = T("Setting added"),
msg_record_deleted = T("Email Setting deleted"),
msg_list_empty = T("No Settings currently defined"),
msg_record_modified = T("Email settings updated")
)
#response.menu_options = admin_menu_options
s3db.configure(tablename, listadd=True, deletable=True)
def postp(r, output):
wtable = s3db.msg_workflow
stable = s3db.scheduler_task
mtable = r.table
s3_action_buttons(r)
query = (stable.enabled == False)
records = db(query).select()
rows = []
for record in records:
if "username" in record.vars:
r = record.vars.split("\"username\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
record1 = db(mtable.username == s).select(mtable.id)
if record1:
for rec in record1:
rows += [rec]
restrict_e = [str(row.id) for row in rows]
query = (stable.enabled == True )
records = db(query).select()
rows = []
for record in records:
if "username" in record.vars:
r = record.vars.split("\"username\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
record1 = db(mtable.username == s).select(mtable.id)
if record1:
for rec in record1:
rows += [rec]
restrict_d = [str(row.id) for row in rows]
rows = []
records = db(stable.id > 0).select()
tasks = [record.vars for record in records]
sources = []
for task in tasks:
if "username" in task:
u = task.split("\"username\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
sources += [v]
msettings = db(mtable.deleted == False).select(mtable.ALL)
for msetting in msettings :
if msetting.username:
if (msetting.username not in sources):
if msetting:
rows += [msetting]
restrict_a = [str(row.id) for row in rows]
s3.actions = \
s3.actions + [
dict(label=str(T("Enable")),
_class="action-btn",
url=URL(f="enable_email",
args="[id]"),
restrict = restrict_e)
]
s3.actions.append(dict(label=str(T("Disable")),
_class="action-btn",
url = URL(f = "disable_email",
args = "[id]"),
restrict = restrict_d)
)
s3.actions.append(dict(label=str(T("Activate")),
_class="action-btn",
url = URL(f = "schedule_email",
args = "[id]"),
restrict = restrict_a)
)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twilio_inbound_settings():
"""
RESTful CRUD controller for twilio sms settings
- appears in the administration menu
"""
if not auth.s3_has_role(ADMIN):
session.error = UNAUTHORISED
redirect(URL(f="index"))
tablename = "msg_twilio_inbound_settings"
table = s3db[tablename]
table.account_name.label = T("Account Name")
table.account_name.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Account Name"),
T("Identifier Name for your Twilio Account."))))
table.url.label = T("URL")
table.url.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("URL"),
T("URL for the twilio API."))))
table.account_sid.label = "Account SID"
table.auth_token.label = T("AUTH TOKEN")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twilio Setting Details"),
title_list = T("Twilio Settings"),
title_create = T("Add Twilio Settings"),
title_update = T("Edit Twilio Settings"),
title_search = T("Search Twilio Settings"),
label_list_button = T("View Twilio Settings"),
label_create_button = T("Add Twilio Settings"),
msg_record_created = T("Twilio Setting added"),
msg_record_deleted = T("Twilio Setting deleted"),
msg_list_empty = T("No Twilio Settings currently defined"),
msg_record_modified = T("Twilio settings updated")
)
#response.menu_options = admin_menu_options
s3db.configure(tablename, listadd=True, deletable=True)
def postp(r, output):
stable = s3db.scheduler_task
ttable = r.table
s3_action_buttons(r)
query = (stable.enabled == False)
records = db(query).select()
rows = []
for record in records:
if "account" in record.vars:
r = record.vars.split("\"account\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
record1 = db(ttable.account_name == s).select(ttable.id)
if record1:
for rec in record1:
rows += [rec]
restrict_e = [str(row.id) for row in rows]
query = (stable.enabled == True)
records = db(query).select()
rows = []
for record in records:
if "account" in record.vars:
r = record.vars.split("\"account\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
record1 = db(ttable.account_name == s).select(ttable.id)
if record1:
for rec in record1:
rows += [rec]
restrict_d = [str(row.id) for row in rows]
rows = []
records = db(stable.id > 0).select()
tasks = [record.vars for record in records]
sources = []
for task in tasks:
if "account" in task:
u = task.split("\"account\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
sources += [v]
tsettings = db(ttable.deleted == False).select(ttable.ALL)
for tsetting in tsettings :
if tsetting.account_name:
if (tsetting.account_name not in sources):
if tsetting:
rows += [tsetting]
restrict_a = [str(row.id) for row in rows]
s3.actions = \
s3.actions + [
dict(label=str(T("Enable")),
_class="action-btn",
url=URL(f="enable_twilio_sms",
args="[id]"),
restrict = restrict_e)
]
s3.actions.append(dict(label=str(T("Disable")),
_class="action-btn",
url = URL(f = "disable_twilio_sms",
args = "[id]"),
restrict = restrict_d)
)
s3.actions.append(dict(label=str(T("Activate")),
_class="action-btn",
url = URL(f = "schedule_twilio_sms",
args = "[id]"),
restrict = restrict_a)
)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def keyword():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sender():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def workflow():
"""
RESTful CRUD controller for workflows
- appears in the administration menu
"""
if not auth.s3_has_role(ADMIN):
session.error = UNAUTHORISED
redirect(URL(f="index"))
table = s3db.msg_workflow
table.source_task_id.label = T("Message Source")
table.source_task_id.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Message Source"),
T("This is the name of the username for the Inbound Message Source."))))
table.workflow_task_id.label = T("Parsing Workflow")
table.workflow_task_id.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Parsing Workflow"),
T("This is the name of the parsing function used as a workflow."))))
# CRUD Strings
s3.crud_strings["msg_workflow"] = Storage(
title_display = T("Setting Details"),
title_list = T("Parser Settings"),
title_create = T("Add Parser Settings"),
title_update = T("Edit Parser Settings"),
title_search = T("Search Parser Settings"),
label_list_button = T("View Settings"),
label_create_button = T("Add Parser Settings"),
msg_record_created = T("Setting added"),
msg_record_deleted = T("Parser Setting deleted"),
msg_list_empty = T("No Settings currently defined"),
msg_record_modified = T("Message Parser settings updated")
)
s3db.configure("msg_workflow", listadd=True, deletable=True)
def prep(r):
if r.interactive:
import inspect
import sys
parser = settings.get_msg_parser()
module_name = "applications.%s.private.templates.%s.parser" % \
(appname, parser)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parsing = mymodule.S3Parsing()
mtable = s3db.msg_inbound_email_settings
ttable = s3db.msg_twilio_inbound_settings
source_opts = []
append = source_opts.append
records = db(mtable.id > 0).select(mtable.username)
for record in records:
append(record.username)
records = db(ttable.deleted == False).select(ttable.account_name)
for record in records:
append(record.account_name)
# Dynamic lookup of the parsing functions in S3Parsing class.
parsers = inspect.getmembers(S3Parsing, predicate=inspect.isfunction)
parse_opts = []
for parser in parsers:
parse_opts += [parser[0]]
r.table.source_task_id.requires = IS_IN_SET(source_opts, zero=None)
r.table.workflow_task_id.requires = IS_IN_SET(parse_opts, \
zero=None)
return True
s3.prep = prep
def postp(r, output):
wtable = s3db.msg_workflow
stable = db["scheduler_task"]
s3_action_buttons(r)
query = stable.enabled == False
records = db(query).select()
rows = []
for record in records:
if "workflow" and "source" in record.vars:
r = record.vars.split("\"workflow\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
u = record.vars.split("\"source\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
query = (wtable.workflow_task_id == s) & \
(wtable.source_task_id == v)
record1 = db(query).select(wtable.id)
if record1:
for rec in record1:
rows += [rec]
restrict_e = [str(row.id) for row in rows]
query = stable.enabled == True
records = db(query).select()
rows = []
for record in records:
if "workflow" and "source" in record.vars:
r = record.vars.split("\"workflow\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
u = record.vars.split("\"source\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
query = (wtable.workflow_task_id == s) & \
(wtable.source_task_id == v)
record1 = db(query).select(wtable.id)
if record1:
for rec in record1:
rows += [rec]
restrict_d = [str(row.id) for row in rows]
rows = []
records = db(stable.id > 0).select(stable.vars)
tasks = [record.vars for record in records]
parser1 = []
for task in tasks:
if "workflow" in task:
r = task.split("\"workflow\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
parser1 += [s]
parser2 = []
for task in tasks:
if "source" in task:
u = task.split("\"source\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
parser2 += [v]
workflows = db(wtable.id > 0).select(wtable.id,
wtable.workflow_task_id,
wtable.source_task_id)
for workflow in workflows :
if workflow.workflow_task_id and workflow.source_task_id:
if (workflow.workflow_task_id not in parser1) or \
(workflow.source_task_id not in parser2):
rows += [workflow]
restrict_a = [str(row.id) for row in rows]
s3.actions = \
s3.actions + [
dict(label=str(T("Enable")),
_class="action-btn",
url=URL(f="enable_parser",
args="[id]"),
restrict = restrict_e)
]
s3.actions.append(dict(label=str(T("Disable")),
_class="action-btn",
url = URL(f = "disable_parser",
args = "[id]"),
restrict = restrict_d)
)
s3.actions.append(dict(label=str(T("Activate")),
_class="action-btn",
url = URL(f = "schedule_parser",
args = "[id]"),
restrict = restrict_a)
)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def schedule_parser():
"""
Schedules different parsing workflows.
"""
try:
id = request.args[0]
except:
session.error = T("Workflow not specified!")
redirect(URL(f="workflow"))
wtable = s3db.msg_workflow
record = db(wtable.id == id).select(wtable.workflow_task_id,
wtable.source_task_id,
limitby=(0, 1)).first()
workflow = record.workflow_task_id
source = record.source_task_id
s3task.schedule_task("msg_parse_workflow",
vars={"workflow": workflow, "source": source},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
redirect(URL(f="workflow"))
# -----------------------------------------------------------------------------
def schedule_email():
"""
Schedules different Email Sources.
"""
try:
id = request.args[0]
except:
session.error = T("Source not specified!")
redirect(URL(f="inbound_email_settings"))
mtable = s3db.msg_inbound_email_settings
record = db(mtable.id == id).select(mtable.username,
limitby=(0, 1)).first()
username = record.username
s3task.schedule_task("msg_process_inbound_email",
vars={"username": username},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
redirect(URL(f="inbound_email_settings"))
# -----------------------------------------------------------------------------
def schedule_twilio_sms():
"""
Schedules different Twilio SMS Sources.
"""
try:
id = request.args[0]
except:
session.error = T("Source not specified!")
redirect(URL(f="twilio_inbound_settings"))
ttable = s3db.msg_twilio_inbound_settings
record = db(ttable.id == id).select(ttable.account_name,
limitby=(0, 1)).first()
account_name = record.account_name
s3task.schedule_task("msg_twilio_inbound_sms",
vars={"account": account_name},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
redirect(URL(f="twilio_inbound_settings"))
# -----------------------------------------------------------------------------
def disable_parser():
"""
Disables different parsing workflows.
"""
try:
id = request.args[0]
except:
session.error = T("Workflow not specified!")
redirect(URL(f="workflow"))
stable = s3db.scheduler_task
wtable = s3db.msg_workflow
records = db(stable.id > 0).select()
workflow = db(wtable.id == id).select(wtable.workflow_task_id,
wtable.source_task_id,
limitby=(0, 1)).first()
for record in records:
if "workflow" and "source" in record.vars:
r = record.vars.split("\"workflow\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
u = record.vars.split("\"source\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
if (s == workflow.workflow_task_id) and (v == workflow.source_task_id) :
db(stable.id == record.id).update(enabled = False)
redirect(URL(f="workflow"))
# -----------------------------------------------------------------------------
def disable_email():
"""
Disables different Email Sources.
"""
try:
id = request.args[0]
except:
session.error = T("Source not specified!")
redirect(URL(f="inbound_email_settings"))
stable = s3db.scheduler_task
mtable = s3db.msg_inbound_email_settings
records = db(stable.id > 0).select()
msettings = db(mtable.id == id).select(limitby=(0, 1)).first()
for record in records:
if "username" in record.vars:
r = record.vars.split("\"username\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
if (s == msettings.username) :
db(stable.id == record.id).update(enabled = False)
redirect(URL(f="inbound_email_settings"))
# -----------------------------------------------------------------------------
def disable_twilio_sms():
"""
Disables different Twilio SMS Sources.
"""
try:
id = request.args[0]
except:
session.error = T("Source not specified!")
redirect(URL(f="twilio_inbound_settings"))
stable = s3db.scheduler_task
ttable = s3db.msg_twilio_inbound_settings
records = db(stable.id > 0).select()
tsettings = db(ttable.id == id).select(limitby=(0, 1)).first()
for record in records:
if "account" in record.vars:
r = record.vars.split("\"account\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
if (s == tsettings.account_name) :
db(stable.id == record.id).update(enabled = False)
redirect(URL(f="twilio_inbound_settings"))
# -----------------------------------------------------------------------------
def enable_email():
"""
Enables different Email Sources.
"""
try:
id = request.args[0]
except:
session.error = T("Source not specified!")
redirect(URL(f="inbound_email_settings"))
stable = s3db.scheduler_task
mtable = s3db.msg_inbound_email_settings
records = db(stable.id > 0).select()
msettings = db(mtable.id == id).select(mtable.ALL).first()
for record in records:
if "username" in record.vars:
r = record.vars.split("\"username\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
if (s == msettings.username) :
db(stable.id == record.id).update(enabled = True)
redirect(URL(f="inbound_email_settings"))
# -----------------------------------------------------------------------------
def enable_twilio_sms():
"""
Enables different Twilio SMS Sources.
"""
try:
id = request.args[0]
except:
session.error = T("Source not specified!")
redirect(URL(f="twilio_inbound_settings"))
stable = s3db.scheduler_task
ttable = s3db.msg_twilio_inbound_settings
records = db(stable.id > 0).select()
tsettings = db(ttable.id == id).select(ttable.ALL).first()
for record in records:
if "account" in record.vars:
r = record.vars.split("\"account\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
if (s == tsettings.account_name) :
db(stable.id == record.id).update(enabled = True)
redirect(URL(f="twilio_inbound_settings"))
# -----------------------------------------------------------------------------
def enable_parser():
"""
Enables different parsing workflows.
"""
try:
id = request.args[0]
except:
session.error = T("Workflow not specified!")
redirect(URL(f="workflow"))
stable = s3db.scheduler_task
wtable = s3db.msg_workflow
records = db(stable.id > 0).select()
workflow = db(wtable.id == id).select(wtable.workflow_task_id,
wtable.source_task_id,
limitby=(0, 1)).first()
for record in records:
if "workflow" and "source" in record.vars:
r = record.vars.split("\"workflow\":")[1]
s = r.split("}")[0]
s = s.split("\"")[1].split("\"")[0]
u = record.vars.split("\"source\":")[1]
v = u.split(",")[0]
v = v.split("\"")[1]
if (s == workflow.workflow_task_id) and \
(v == workflow.source_task_id):
db(stable.id == record.id).update(enabled = True)
redirect(URL(f="workflow"))
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages go here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user", args="login"))
tablename = "msg_email_inbox"
table = s3db[tablename]
s3db.configure(tablename, listadd=False)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twilio_inbox():
"""
RESTful CRUD controller for the Twilio SMS Inbox
- all Inbound SMS Messages from Twilio go here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user", args="login"))
tablename = "msg_twilio_inbox"
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twilio SMS Inbox"),
title_list = T("Twilio SMS Inbox"),
title_update = T("Edit SMS Message"),
title_search = T("Search Twilio SMS Inbox"),
label_list_button = T("View Twilio SMS"),
msg_record_deleted = T("Twilio SMS deleted"),
msg_list_empty = T("Twilio SMS Inbox empty. "),
msg_record_modified = T("Twilio SMS updated")
)
s3db.configure(tablename, listadd=False)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def modem_settings():
"""
RESTful CRUD controller for modem settings
- appears in the administration menu
Multiple Modems can be configured to receive Inbound Messages
"""
try:
import serial
except ImportError:
session.error = T("Python Serial module not available within the running Python - this needs installing to activate the Modem")
redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.modem_port.label = T("Port")
table.modem_baud.label = T("Baud")
table.enabled.label = T("Enabled")
table.modem_port.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Port"),
T("The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows"))))
table.modem_baud.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Baud"),
T("Baud rate to use for your modem - The default is safe for most cases"))))
table.enabled.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable the modem"))))
# CRUD Strings
ADD_SETTING = T("Add Setting")
s3.crud_strings[tablename] = Storage(
title_create = ADD_SETTING,
title_display = T("Setting Details"),
title_list = T("Settings"),
title_update = T("Edit Modem Settings"),
title_search = T("Search Settings"),
label_list_button = T("View Settings"),
label_create_button = ADD_SETTING,
msg_record_created = T("Setting added"),
msg_record_modified = T("Modem settings updated"),
msg_record_deleted = T("Setting deleted"),
msg_list_empty = T("No Settings currently defined")
)
s3db.configure(tablename,
#deletable=False,
#listadd=False,
#update_next = URL(args=[1, "update"])
)
#response.menu_options = admin_menu_options
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def smtp_to_sms_settings():
"""
RESTful CRUD controller for SMTP to SMS settings
- appears in the administration menu
Only 1 of these ever in existence
"""
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.address.label = T("Address")
table.subject.label = T("Subject")
table.enabled.label = T("Enabled")
table.address.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Address"),
T("Email Address to which to send SMS messages. Assumes sending to phonenumber@address"))))
table.subject.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Subject"),
T("Optional Subject to put into Email - can be used as a Security Password by the service provider"))))
table.enabled.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable this SMTP service"))))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Edit SMTP to SMS Settings"),
msg_record_modified = T("SMTP to SMS settings updated"),
)
s3db.configure(tablename,
deletable=False,
listadd=False,
update_next = URL(args=[1, "update"]))
#response.menu_options = admin_menu_options
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def api_settings():
"""
RESTful CRUD controller for Web API settings
- appears in the administration menu
Only 1 of these ever in existence
"""
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.url.label = T("URL")
table.to_variable.label = T("To variable")
table.message_variable.label = T("Message variable")
table.enabled.label = T("Enabled")
table.url.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("URL"),
T("The URL of your web gateway without the post parameters"))))
table.parameters.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Parameters"),
T("The post variables other than the ones containing the message and the phone number"))))
table.message_variable.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Message Variable"),
T("The post variable on the URL used for sending messages"))))
table.to_variable.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("To variable"),
T("The post variable containing the phone number"))))
table.enabled.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable this API service"))))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Edit Web API Settings"),
msg_record_modified = T("Web API settings updated"),
)
s3db.configure(tablename,
deletable=False,
listadd=False,
update_next = URL(args=[1, "update"]))
#response.menu_options = admin_menu_options
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def tropo_settings():
"""
RESTful CRUD controller for Tropo settings
- appears in the administration menu
Only 1 of these ever in existence
"""
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
table.token_messaging.label = T("Tropo Messaging Token")
table.token_messaging.comment = DIV(DIV(_class="stickytip",
_title="%s|%s" % (T("Tropo Messaging Token"),
T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>")))
#table.token_voice.label = T("Tropo Voice Token")
#table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Edit Tropo Settings"),
msg_record_modified = T("Tropo settings updated"),
)
s3db.configure(tablename,
deletable=False,
listadd=False,
update_next = URL(args=[1, "update"]))
#response.menu_options = admin_menu_options
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def twitter_settings():
"""
RESTful CRUD controller for Twitter settings
- appears in the administration menu
Only 1 of these ever in existence
"""
try:
import tweepy
except:
session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Authenticate system's Twitter account"),
msg_record_modified = T("System's Twitter account updated"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
#tablename = "%s_%s" % (module, resourcename)
#table = db[tablename]
table = r.table
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
table.pin.readable = True
table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
table.pin.readable = False
table.pin.label = T("PIN") # won't be seen
table.pin.value = "" # but let's be on the safe side
return True
s3.prep = prep
# Post-processor
def user_postp(r, output):
output["list_btn"] = ""
if r.http == "GET" and r.method in ("create", "update"):
rheader = A(T("Collect PIN from Twitter"),
_href=T(session.s3.twitter_oauth_url),
_target="_blank")
output["rheader"] = rheader
return output
s3.postp = user_postp
#response.menu_options = admin_menu_options
s3db.configure(tablename, listadd=False, deletable=False)
return s3_rest_controller()
# =============================================================================
# The following functions hook into the pr functions:
#
def group():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next":URL(c="msg", f="group")}))
module = "pr"
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# Hide unnecessary fields
table.description.readable = table.description.writable = False
# Do not show system groups
s3.filter = (table.system == False)
return s3_rest_controller(module, resourcename, rheader=s3db.pr_rheader)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next":URL(c="msg", f="group_membership")}))
table = s3db.pr_group_membership
# Hide unnecessary fields
table.description.readable = table.description.writable = False
table.comments.readable = table.comments.writable = False
table.group_head.readable = table.group_head.writable = False
return s3_rest_controller("pr", resourcename)
# -----------------------------------------------------------------------------
def contact():
""" Allows the user to add, update and delete their contacts """
table = s3db.pr.contact
ptable = s3db.pr_person
if auth.is_logged_in() or auth.basic():
s3.filter = (table.pe_id == auth.user.pe_id)
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next":URL(c="msg", f="contact")}))
# These fields will be populated automatically
table.name.writable = table.name.readable = False
table.pe_id.writable = table.pe_id.readable = False
table.person_name.writable = table.person_name.readable = False
table.id.writable = False
#table.id.readable = False
def msg_contact_onvalidation(form):
""" This onvalidation method adds the person id to the record """
if auth.user:
form.vars.pe_id = auth.user.pe_id
s3db.configure(table._tablename,
onvalidation=msg_contact_onvalidation)
def msg_contact_restrict_access(r):
""" The following restricts update and delete access to contacts not owned by the user """
if r.id :
pe_id = r.record.pe_id
if auth.user and auth.user.pe_id == pe_id:
return True
else:
session.error = T("Access denied")
return dict(bypass = True, output = redirect(URL(r=request)))
else:
return True
s3.prep = msg_contact_restrict_access
response.menu_options = []
return s3_rest_controller("pr", resourcename)
# -----------------------------------------------------------------------------
def search():
"""
Do a search of groups which match a type
- used for auto-completion
"""
if not (auth.is_logged_in() or auth.basic()):
# Not allowed
return
# JQuery UI Autocomplete uses 'term' instead of 'value'
# (old JQuery Autocomplete uses 'q' instead of 'value')
value = request.vars.term or request.vars.q
type = request.get_vars.get("type", None)
if value:
# Call the search function
if type:
items = person_search(value, type)
else:
items = person_search(value)
# Encode in JSON
item = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return item
return
# -----------------------------------------------------------------------------
def recipient_represent(id, default_label=""):
""" Simplified output as-compared to pr_pentity_represent """
output = ""
table = s3db.pr_pentity
pe = db(table.pe_id == id).select(table.instance_type,
limitby=(0, 1)).first()
if not pe:
return output
instance_type = pe.instance_type
table = db.get(instance_type, None)
if not table:
return output
if instance_type == "pr_person":
person = db(table.pe_id == id).select(table.first_name,
table.middle_name,
table.last_name,
limitby=(0, 1)).first()
if person:
output = s3_fullname(person)
elif instance_type == "pr_group":
group = db(table.pe_id == id).select(table.name,
limitby=(0, 1)).first()
if group:
output = group.name
return output
# -----------------------------------------------------------------------------
def person_search(value, type=None):
""" Search for People & Groups which match a search term """
# Shortcuts
groups = s3db.pr_group
persons = s3db.pr_person
items = []
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if type:
represent = recipient_represent
else:
represent = s3db.pr_pentity_represent
if type == "pr_group" or not type:
# Check Groups
query = (groups["name"].lower().like("%" + value + "%")) & (groups.deleted == False)
rows = db(query).select(groups.pe_id)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
if type == "pr_person" or not type:
# Check Persons
deleted = (persons.deleted == False)
# First name
query = (persons["first_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Middle name
query = (persons["middle_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Last name
query = (persons["last_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
return items
# -----------------------------------------------------------------------------
def subscription():
return s3_rest_controller()
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def tag():
""" RESTful CRUD controller """
tablename = "%s_%s" % (module, resourcename)
table = s3db[tablename]
# Load all models
s3db.load_all_models()
table.resource.requires = IS_IN_SET(db.tables)
s3db.configure(tablename, listadd=False)
return s3_rest_controller()
# END ================================================================================
|
casadi/casadi | refs/heads/master | test/python/ocp.py | 3 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import casadi as c
import numpy
import unittest
from types import *
from helpers import *
class OCPtests(casadiTestCase):
@requires_nlpsol("ipopt")
def testdiscrete(self):
self.message("Linear-quadratic problem, discrete, using IPOPT")
# inspired by www.cs.umsl.edu/~janikow/publications/1992/GAforOpt/text.pdf
a=1.0
b=1.0
q=1.0
s=1.0
r=1.0
x0=100
N=100
X=SX.sym("X",N+1)
U=SX.sym("U",N)
V = vertcat(*[X,U])
cost = 0
for i in range(N):
cost = cost + s*X[i]**2+r*U[i]**2
cost = cost + q*X[N]**2
nlp = {'x':V, 'f':cost, 'g':vertcat(*[X[0]-x0,X[1:,0]-(a*X[:N,0]+b*U)])}
opts = {}
opts["ipopt.tol"] = 1e-5
opts["ipopt.hessian_approximation"] = "limited-memory"
opts["ipopt.max_iter"] = 100
opts["ipopt.print_level"] = 0
solver = nlpsol("solver", "ipopt", nlp, opts)
solver_in = {}
solver_in["lbx"]=[-1000 for i in range(V.nnz())]
solver_in["ubx"]=[1000 for i in range(V.nnz())]
solver_in["lbg"]=[0 for i in range(N+1)]
solver_in["ubg"]=[0 for i in range(N+1)]
solver_out = solver(**solver_in)
ocp_sol=solver_out["f"][0]
# solve the ricatti equation exactly
K = q+0.0
for i in range(N):
K = s+r*a**2*K/(r+b**2*K)
exact_sol=K * x0**2
self.assertAlmostEqual(ocp_sol,exact_sol,10,"Linear-quadratic problem solution using IPOPT")
@requires_nlpsol("ipopt")
def test_singleshooting(self):
self.message("Single shooting")
p0 = 0.2
y0= 1
yc0=dy0=0
te=0.4
t=SX.sym("t")
q=SX.sym("y",2,1)
p=SX.sym("p",1,1)
# y
# y'
dae={'x':q, 'p':p, 't':t, 'ode':vertcat(*[q[1],p[0]+q[1]**2 ])}
opts = {}
opts["reltol"] = 1e-15
opts["abstol"] = 1e-15
opts["verbose"] = False
opts["steps_per_checkpoint"] = 10000
opts["t0"] = 0
opts["tf"] = te
integrator = casadi.integrator("integrator", "cvodes", dae, opts)
var = MX.sym("var",2,1)
par = MX.sym("par",1,1)
parMX= par
q0 = vertcat(*[var[0],par])
par = var[1]
qend = integrator(x0=q0, p=par)["xf"]
parc = MX(0)
f = Function('f', [var,parMX],[qend[0]])
nlp = {'x':var, 'f':-f(var,parc)}
opts = {}
opts["ipopt.tol"] = 1e-12
opts["ipopt.hessian_approximation"] = "limited-memory"
opts["ipopt.max_iter"] = 10
opts["ipopt.derivative_test"] = "first-order"
opts["ipopt.print_level"] = 0
solver = nlpsol("solver", "ipopt", nlp, opts)
solver_in = {}
solver_in["lbx"]=[-1, -1]
solver_in["ubx"]=[1, 0.2]
solver_out = solver(**solver_in)
self.assertAlmostEqual(solver_out["x"][0],1,8,"X_opt")
self.assertAlmostEqual(solver_out["x"][1],0.2,8,"X_opt")
self.assertAlmostEqual(fmax(solver_out["lam_x"],0)[0],1,8,"Cost should be linear in y0")
self.assertAlmostEqual(fmax(solver_out["lam_x"],0)[1],(sqrt(p0)*(te*yc0**2-yc0+p0*te)*tan(arctan(yc0/sqrt(p0))+sqrt(p0)*te)+yc0**2)/(2*p0*yc0**2+2*p0**2),8,"Cost should be linear in y0")
self.assertAlmostEqual(-solver_out["f"][0],(2*y0-log(yc0**2/p0+1))/2-log(cos(arctan(yc0/sqrt(p0))+sqrt(p0)*te)),7,"Cost")
self.assertAlmostEqual(fmax(-solver_out["lam_x"],0)[0],0,8,"Constraint is supposed to be unactive")
self.assertAlmostEqual(fmax(-solver_out["lam_x"],0)[1],0,8,"Constraint is supposed to be unactive")
@requires_nlpsol("ipopt")
def test_singleshooting2(self):
self.message("Single shooting 2")
p0 = 0.2
y0= 0.2
yc0=dy0=0.1
te=0.4
t=SX.sym("t")
q=SX.sym("y",2,1)
p=SX.sym("p",1,1)
# y
# y'
dae={'x':q, 'p':p, 't':t, 'ode':vertcat(*[q[1],p[0]+q[1]**2 ])}
opts = {}
opts["reltol"] = 1e-15
opts["abstol"] = 1e-15
opts["verbose"] = False
opts["steps_per_checkpoint"] = 10000
opts["t0"] = 0
opts["tf"] = te
integrator = casadi.integrator("integrator", "cvodes", dae, opts)
var = MX.sym("var",2,1)
par = MX.sym("par",1,1)
q0 = vertcat(*[var[0],par])
parl = var[1]
qend = integrator(x0=q0,p=parl)["xf"]
parc = MX(dy0)
f = Function('f', [var,par],[qend[0]])
nlp = {'x':var, 'f':-f(var,parc), 'g':var[0]-var[1]}
opts = {}
opts["ipopt.tol"] = 1e-12
opts["ipopt.hessian_approximation"] = "limited-memory"
opts["ipopt.max_iter"] = 10
opts["ipopt.derivative_test"] = "first-order"
#opts["ipopt.print_level"] = 0
solver = nlpsol("solver", "ipopt", nlp, opts)
solver_in = {}
solver_in["lbx"]=[-1, -1]
solver_in["ubx"]=[1, 0.2]
solver_in["lbg"]=[-1]
solver_in["ubg"]=[0]
solver_out = solver(**solver_in)
self.assertAlmostEqual(solver_out["x"][0],0.2,6,"X_opt")
self.assertAlmostEqual(solver_out["x"][1],0.2,6,"X_opt")
self.assertAlmostEqual(fmax(solver_out["lam_x"],0)[0],0,8,"Constraint is supposed to be unactive")
dfdp0 = (sqrt(p0)*(te*yc0**2-yc0+p0*te)*tan(arctan(yc0/sqrt(p0))+sqrt(p0)*te)+yc0**2)/(2*p0*yc0**2+2*p0**2)
self.assertAlmostEqual(fmax(solver_out["lam_x"],0)[1],1+dfdp0,8)
self.assertAlmostEqual(solver_out["lam_g"][0],1,8)
self.assertAlmostEqual(-solver_out["f"][0],(2*y0-log(yc0**2/p0+1))/2-log(cos(arctan(yc0/sqrt(p0))+sqrt(p0)*te)),7,"Cost")
self.assertAlmostEqual(fmax(-solver_out["lam_x"],0)[0],0,8,"Constraint is supposed to be unactive")
self.assertAlmostEqual(fmax(-solver_out["lam_x"],0)[1],0,8,"Constraint is supposed to be unactive")
@requiresPlugin(XmlFile,"tinyxml")
def test_XML(self):
self.message("JModelica XML parsing")
ivp = DaeBuilder()
ivp.parse_fmi('data/cstr.xml')
# Separate differential and algebraic variables
ivp.split_dae()
self.assertTrue(len(ivp.q)==0)
self.assertTrue(len(ivp.y)==1)
m = vertcat(*ivp.ydef)
self.assertTrue(isinstance(m,MX))
self.assertEqual(str(m),'cost')
print(dir(ivp))
self.assertEqual(len(ivp.dae),3)
print(type(ivp.s))
self.assertEqual(len(ivp.s),3) # there are three states
c = ivp("cstr.c")
T = ivp("cstr.T")
cost = ivp("cost")
self.assertTrue(isinstance(c,MX))
self.assertEqual(c.name(),"cstr.c")
self.assertEqual(T.name(),"cstr.T")
self.assertEqual(cost.name(),"cost")
self.assertEqual(ivp.nominal("cstr.c"),1000)
u = ivp("u")
#self.assertEquals(ivp.path.nnz(),3)
#self.assertEquals(len(ivp.cfcn_lb),3)
#self.assertEquals(len(ivp.cfcn_ub),3)
#self.assertTrue(ivp.cfcn[0].is_equal(T))
#self.assertTrue(ivp.cfcn[1].is_equal(u))
#self.assertTrue(ivp.cfcn[2].is_equal(u))
#self.assertTrue(ivp.cfcn_lb[0].isMinusInf())
#self.assertEquals(ivp.cfcn_lb[1].to_double(),230)
#self.assertTrue(ivp.cfcn_lb[2].isMinusInf())
#self.assertEquals(ivp.cfcn_ub[0].to_double(),350)
#self.assertTrue(ivp.cfcn_ub[1].isInf())
#self.assertEquals(ivp.cfcn_ub[2].to_double(),370)
print(ivp.init)
print(c,T,cost)
#print c.atTime(0)
f=Function('f', [vertcat(*[c,T,cost])],[vertcat(*ivp.init)])
return
f_out = f(f_in)
self.checkarray(f_out[0],matrix([-956.271065,-250.051971,0]).T,"initeq")
mystates = []
if __name__ == '__main__':
unittest.main()
|
ropik/chromium | refs/heads/master | third_party/protobuf/python/google/protobuf/internal/type_checkers.py | 527 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
|
DPaaS-Raksha/horizon | refs/heads/master | horizon/test/test_dashboards/dogs/puppies/views.py | 38 | from horizon import views
class IndexView(views.APIView):
# A very simple class-based view...
template_name = 'dogs/puppies/index.html'
def get_data(self, request, context, *args, **kwargs):
# Add data to the context here...
return context
|
peterfpeterson/mantid | refs/heads/master | Framework/PythonInterface/plugins/algorithms/EnggCalibrate.py | 1 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.kernel import *
from mantid.api import *
import mantid.simpleapi as mantid
class EnggCalibrate(PythonAlgorithm):
INDICES_PROP_NAME = 'SpectrumNumbers'
def category(self):
return "Diffraction\\Engineering"
def seeAlso(self):
return [ "EnggCalibrateFull" ]
def name(self):
return "EnggCalibrate"
def summary(self):
return ("Calibrates one or more detector banks (or group(s) of detectors) by performing single peak "
"fitting.")
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "", Direction.Input),
doc="Workspace with the calibration run to use.")
import EnggUtils
self.declareProperty(FloatArrayProperty("ExpectedPeaks",
values=EnggUtils.default_ceria_expected_peaks(),
direction=Direction.Input),
doc="A list of dSpacing values where peaks are expected.")
self.declareProperty(FileProperty(name="ExpectedPeaksFromFile", defaultValue="", action=FileAction.OptionalLoad,
extensions=[".csv"]),
doc="Load from file a list of dSpacing values to be translated into TOF to find expected "
"peaks. This takes precedence over 'ExpectedPeaks' if both options are given.")
peaks_grp = 'Peaks to fit'
self.setPropertyGroup('ExpectedPeaks', peaks_grp)
self.setPropertyGroup('ExpectedPeaksFromFile', peaks_grp)
self.declareProperty(MatrixWorkspaceProperty("VanadiumWorkspace", "", Direction.Input, PropertyMode.Optional),
doc='Workspace with the Vanadium (correction and calibration) run. Alternatively, '
'when the Vanadium run has been already processed, the properties can be used')
self.declareProperty(ITableWorkspaceProperty("VanIntegrationWorkspace", "", Direction.Input,
PropertyMode.Optional),
doc='Results of integrating the spectra of a Vanadium run, with one column '
'(integration result) and one row per spectrum. This can be used in '
'combination with OutVanadiumCurveFits from a previous execution and '
'VanadiumWorkspace to provide pre-calculated values for Vanadium correction.')
self.declareProperty(MatrixWorkspaceProperty('VanCurvesWorkspace', '', Direction.Input, PropertyMode.Optional),
doc='A workspace2D with the fitting workspaces corresponding to the instrument banks. '
'This workspace has three spectra per bank, as produced by the algorithm Fit. '
'This is meant to be used as an alternative input VanadiumWorkspace for testing and '
'performance reasons. If not given, no workspace is generated.')
vana_grp = 'Vanadium (open beam) properties'
self.setPropertyGroup('VanadiumWorkspace', vana_grp)
self.setPropertyGroup('VanIntegrationWorkspace', vana_grp)
self.setPropertyGroup('VanCurvesWorkspace', vana_grp)
self.declareProperty("Bank", '', StringListValidator(EnggUtils.ENGINX_BANKS),
direction=Direction.Input,
doc="Which bank to calibrate. It can be specified as 1 or 2, or "
"equivalently, North or South. See also " + self.INDICES_PROP_NAME + " "
"for a more flexible alternative to select specific detectors")
self.declareProperty(self.INDICES_PROP_NAME, '', direction=Direction.Input,
doc='Sets the spectrum numbers for the detectors '
'that should be considered in the calibration (all others will be '
'ignored). This option cannot be used together with Bank, as they overlap. '
'You can give multiple ranges, for example: "0-99", or "0-9, 50-59, 100-109".')
banks_grp = 'Banks / spectra'
self.setPropertyGroup('Bank', banks_grp)
self.setPropertyGroup(self.INDICES_PROP_NAME, banks_grp)
self.declareProperty(ITableWorkspaceProperty("DetectorPositions", "", Direction.Input, PropertyMode.Optional),
"Calibrated detector positions. If not specified, default ones (from the "
"current instrument definition) are used.")
self.declareProperty('OutputParametersTableName', '', direction=Direction.Input,
doc='Name for a table workspace with the calibration parameters calculated '
'from this algorithm: difc and zero parameters for GSAS. these two parameters '
'are added as two columns in a single row. If not given, no table is '
'generated.')
self.declareProperty("DIFA", 0.0, direction=Direction.Output,
doc="Calibration parameter DIFA for the bank or range of pixels/detectors given")
self.declareProperty("DIFC", 0.0, direction=Direction.Output,
doc="Calibration parameter DIFC for the bank or range of pixels/detectors given")
self.declareProperty("TZERO", 0.0, direction=Direction.Output,
doc="Calibration parameter TZERO for the bank or range of pixels/detectors given")
self.declareProperty(ITableWorkspaceProperty("FittedPeaks", "", Direction.Output),
doc="Information on fitted peaks as produced by the (child) algorithm EnggFitPeaks.")
out_grp = 'Outputs'
self.setPropertyGroup('DetectorPositions', out_grp)
self.setPropertyGroup('OutputParametersTableName', out_grp)
self.setPropertyGroup('DIFA', out_grp)
self.setPropertyGroup('DIFC', out_grp)
self.setPropertyGroup('TZERO', out_grp)
self.setPropertyGroup('FittedPeaks', out_grp)
def validateInputs(self):
issues = dict()
if not self.getPropertyValue("ExpectedPeaksFromFile") and not self.getPropertyValue('ExpectedPeaks'):
issues['ExpectedPeaks'] = ("Cannot run this algorithm without any expected peak. Please provide "
"either a list of peaks or a file with a list of peaks")
return issues
def PyExec(self):
import EnggUtils
max_reports = 20
prog = Progress(self, start=0, end=1, nreports=max_reports)
# Get peaks in dSpacing from file
prog.report("Reading peaks")
expected_peaks_dsp = EnggUtils.read_in_expected_peaks(filename=self.getPropertyValue("ExpectedPeaksFromFile"),
expected_peaks=self.getProperty('ExpectedPeaks').value)
if len(expected_peaks_dsp) < 1:
raise ValueError("Cannot run this algorithm without any input expected peaks")
prog.report('Focusing the input workspace')
focused_ws = self._focus_run(self.getProperty('InputWorkspace').value,
self.getProperty("VanadiumWorkspace").value,
self.getProperty('Bank').value,
self.getProperty(self.INDICES_PROP_NAME).value,
prog)
prog.report('Fitting parameters for the focused run')
difa, difc, zero, fitted_peaks = self._fit_params(focused_ws, expected_peaks_dsp, prog)
self.log().information("Fitted {0} peaks. Resulting DIFA: {1}, DIFC: {2}, TZERO: {3}".
format(fitted_peaks.rowCount(), difa, difc, zero))
self.log().information("Peaks fitted: {0}, centers in ToF: {1}".
format(fitted_peaks.column("dSpacing"),
fitted_peaks.column("X0")))
prog.report("Producing outputs")
self._produce_outputs(difa, difc, zero, fitted_peaks)
prog.report(max_reports, "Calibration complete")
def _fit_params(self, focused_ws, expected_peaks_d, prog):
"""
Fit the GSAS parameters that this algorithm produces: DIFC and TZERO. Fits a
number of peaks starting from the expected peak positions. Then it fits a line
on the peak positions to produce the DIFC and TZERO as used in GSAS.
@param focused_ws :: focused workspace to do the fitting on
@param expected_peaks_d :: expected peaks, used as intial peak positions for the
fitting, in d-spacing units
@param prog :: progress reporter
@returns a tuple with three GSAS calibration parameters (DIFA, DIFC, ZERO),
and a list of peak centers as fitted
"""
fit_alg = self.createChildAlgorithm('EnggFitPeaks')
fit_alg.setProperty('InputWorkspace', focused_ws)
fit_alg.setProperty('WorkspaceIndex', 0) # There should be only one index anyway
fit_alg.setProperty('ExpectedPeaks', expected_peaks_d)
# we could also pass raw 'ExpectedPeaks' and 'ExpectedPeaksFromFile' to
# EnggFitPaks, but better to check inputs early, before this
fit_alg.execute()
fitted_peaks = fit_alg.getProperty('FittedPeaks').value
difc_alg = self.createChildAlgorithm('EnggFitTOFFromPeaks')
difc_alg.setProperty('FittedPeaks', fitted_peaks)
prog.report("Performing fit")
difc_alg.execute()
prog.report("Fit complete")
difa = difc_alg.getProperty('DIFA').value
difc = difc_alg.getProperty('DIFC').value
zero = difc_alg.getProperty('TZERO').value
return difa, difc, zero, fitted_peaks
def _focus_run(self, ws, vanadium_ws, bank, indices, prog):
"""
Focuses the input workspace by running EnggFocus as a child algorithm, which will produce a
single spectrum workspace.
@param ws :: workspace to focus
@param vanadium_ws :: workspace with Vanadium run for corrections
@param bank :: the focusing will be applied on the detectors of this bank
@param indices :: list of indices to consider, as an alternative to bank (bank and indices are
mutually exclusive)
@return focused (summed) workspace
"""
prog.report("Initialising EnggFocus")
engg_focus_params = dict()
detector_positions = self.getProperty('DetectorPositions').value
if detector_positions:
engg_focus_params["DetectorPositions"] = detector_positions
if vanadium_ws:
engg_focus_params["VanadiumWorkspace"] = vanadium_ws
van_integration_ws = self.getProperty('VanIntegrationWorkspace').value
if van_integration_ws:
engg_focus_params["VanIntegrationWorkspace"] = van_integration_ws
van_curves_ws = self.getProperty('VanCurvesWorkspace').value
if van_curves_ws:
engg_focus_params['VanCurvesWorkspace'] = van_curves_ws
prog.report("Running EnggFocus")
return mantid.EnggFocus(InputWorkspace=ws, Bank=bank, SpectrumNumbers=indices, StoreInADS=False,
startProgress=0.3, endProgress=0.6, **engg_focus_params)
def _produce_outputs(self, difa, difc, zero, fitted_peaks):
"""
Just fills in the output properties as requested
@param difa :: the DIFA GSAS parameter as fitted here
@param difc :: the DIFC GSAS parameter as fitted here
@param zero :: the TZERO GSAS parameter as fitted here
@param fitted_peaks :: table workspace with peak parameters (one peak per row)
"""
import EnggUtils
self.setProperty('DIFA', difa)
self.setProperty('DIFC', difc)
self.setProperty('TZERO', zero)
self.setProperty('FittedPeaks', fitted_peaks)
# make output table if requested
table_name = self.getPropertyValue("OutputParametersTableName")
if '' != table_name:
EnggUtils.generate_output_param_table(table_name, difa, difc, zero)
self.log().information("Output parameters added into a table workspace: %s" % table_name)
AlgorithmFactory.subscribe(EnggCalibrate)
|
m4734/mysql_pio | refs/heads/5.7 | boost_1_59_0/tools/build/test/scanner_causing_rebuilds.py | 38 | #!/usr/bin/python
# Copyright 2012 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Tests for a bug causing Boost Build's scanner targets to be rebuilt.
# unnecessarily in the following scenario:
# * We want to build target X requiring target A.
# * We have a multi-file action generating targets A & B.
# * Out action generates target B with a more recent timestamp than target A.
# * Target A includes target B.
# * Target A has a registered include scanner.
# Now even if our targets A & B have already been built and are up-to-date
# (e.g. in a state left by a previous successful build run), our scanner target
# tasked with scanning target A will be marked for updating, thus causing any
# targets depending on it to be updated/rebuilt as well.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("foo.jam", r"""
import common ;
import generators ;
import modules ;
import type ;
import types/cpp ;
type.register FOO : foo ;
type.register BAR : bar ;
generators.register-standard foo.foo : FOO : CPP BAR ;
local rule sleep-cmd ( delay )
{
if [ modules.peek : NT ]
{
return ping 127.0.0.1 -n $(delay) -w 1000 >NUL ;
}
else
{
return sleep $(delay) ;
}
}
.touch = [ common.file-creation-command ] ;
.sleep = [ sleep-cmd 2 ] ;
rule foo ( cpp bar : foo : properties * )
{
# We add the INCLUDE relationship between our generated CPP & BAR targets
# explicitly instead of relying on Boost Jam's internal implementation
# detail - automatically adding such relationships between all files
# generated by the same action. This way our test will continue to function
# correctly even if the related Boost Jam implementation detail changes.
# Note that adding this relationship by adding an #include directive in our
# generated CPP file is not good enough as such a relationship would get
# added only after the scanner target's relationships have already been
# established and they (as affected by our initial INCLUDE relationship) are
# the original reason for this test failing.
INCLUDES $(cpp) : $(bar) ;
}
actions foo
{
$(.touch) "$(<[1])"
$(.sleep)
$(.touch) "$(<[2])"
}
""")
t.write("x.foo", "")
t.write("jamroot.jam", """\
import foo ;
lib x : x.foo : <link>static ;
""")
# Get everything built once.
t.run_build_system()
# Simply rerunning the build without touching any of its source target files
# should not cause any files to be affected.
t.run_build_system()
t.expect_nothing_more()
|
skatecoin/p2pool-sk8 | refs/heads/master | wstools/MIMEAttachment.py | 294 | #TODO add the license
#I had to rewrite this class because the python MIME email.mime (version 2.5)
#are buggy, they use \n instead \r\n for new line which is not compliant
#to standard!
# http://bugs.python.org/issue5525
#TODO do not load all the message in memory stream it from the disk
import re
import random
import sys
#new line
NL='\r\n'
_width = len(repr(sys.maxint-1))
_fmt = '%%0%dd' % _width
class MIMEMessage:
def __init__(self):
self._files = []
self._xmlMessage = ""
self._startCID = ""
self._boundary = ""
def makeBoundary(self):
#create the boundary
msgparts = []
msgparts.append(self._xmlMessage)
for i in self._files:
msgparts.append(i.read())
#this sucks, all in memory
alltext = NL.join(msgparts)
self._boundary = _make_boundary(alltext)
#maybe I can save some memory
del alltext
del msgparts
self._startCID = "<" + (_fmt % random.randrange(sys.maxint)) + (_fmt % random.randrange(sys.maxint)) + ">"
def toString(self):
'''it return a string with the MIME message'''
if len(self._boundary) == 0:
#the makeBoundary hasn't been called yet
self.makeBoundary()
#ok we have everything let's start to spit the message out
#first the XML
returnstr = NL + "--" + self._boundary + NL
returnstr += "Content-Type: text/xml; charset=\"us-ascii\"" + NL
returnstr += "Content-Transfer-Encoding: 7bit" + NL
returnstr += "Content-Id: " + self._startCID + NL + NL
returnstr += self._xmlMessage + NL
#then the files
for file in self._files:
returnstr += "--" + self._boundary + NL
returnstr += "Content-Type: application/octet-stream" + NL
returnstr += "Content-Transfer-Encoding: binary" + NL
returnstr += "Content-Id: <" + str(id(file)) + ">" + NL + NL
file.seek(0)
returnstr += file.read() + NL
#closing boundary
returnstr += "--" + self._boundary + "--" + NL
return returnstr
def attachFile(self, file):
'''
it adds a file to this attachment
'''
self._files.append(file)
def addXMLMessage(self, xmlMessage):
'''
it adds the XML message. we can have only one XML SOAP message
'''
self._xmlMessage = xmlMessage
def getBoundary(self):
'''
this function returns the string used in the mime message as a
boundary. First the write method as to be called
'''
return self._boundary
def getStartCID(self):
'''
This function returns the CID of the XML message
'''
return self._startCID
def _make_boundary(text=None):
#some code taken from python stdlib
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxint)
boundary = ('=' * 10) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
|
shareefalis/ace-iphone | refs/heads/master | prepare.py | 7 | #!/usr/bin/env python
############################################################################
# prepare.py
# Copyright (C) 2015 Belledonne Communications, Grenoble France
#
############################################################################
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
############################################################################
import argparse
import os
import re
import shutil
import tempfile
import sys
from logging import error, warning, info, INFO, basicConfig
from distutils.spawn import find_executable
from subprocess import Popen, PIPE
sys.dont_write_bytecode = True
sys.path.insert(0, 'submodules/cmake-builder')
try:
import prepare
except Exception as e:
error(
"Could not find prepare module: {}, probably missing submodules/cmake-builder? Try running:\n"
"git submodule sync && git submodule update --init --recursive".format(e))
exit(1)
class IOSTarget(prepare.Target):
def __init__(self, arch):
prepare.Target.__init__(self, 'ios-' + arch)
current_path = os.path.dirname(os.path.realpath(__file__))
self.config_file = 'configs/config-ios-' + arch + '.cmake'
self.toolchain_file = 'toolchains/toolchain-ios-' + arch + '.cmake'
self.output = 'liblinphone-sdk/' + arch + '-apple-darwin.ios'
self.additional_args = [
'-DCMAKE_INSTALL_MESSAGE=LAZY',
'-DLINPHONE_BUILDER_EXTERNAL_SOURCE_PATH=' +
current_path + '/submodules'
]
def clean(self):
if os.path.isdir('WORK'):
shutil.rmtree(
'WORK', ignore_errors=False, onerror=self.handle_remove_read_only)
if os.path.isdir('liblinphone-sdk'):
shutil.rmtree(
'liblinphone-sdk', ignore_errors=False, onerror=self.handle_remove_read_only)
class IOSi386Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'i386')
class IOSx8664Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'x86_64')
class IOSarmv7Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'armv7')
class IOSarm64Target(IOSTarget):
def __init__(self):
IOSTarget.__init__(self, 'arm64')
targets = {
'i386': IOSi386Target(),
'x86_64': IOSx8664Target(),
'armv7': IOSarmv7Target(),
'arm64': IOSarm64Target()
}
archs_device = ['arm64', 'armv7']
archs_simu = ['i386', 'x86_64']
platforms = ['all', 'devices', 'simulators'] + archs_device + archs_simu
class PlatformListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values:
for value in values:
if value not in platforms:
message = ("invalid platform: {0!r} (choose from {1})".format(
value, ', '.join([repr(platform) for platform in platforms])))
raise argparse.ArgumentError(self, message)
setattr(namespace, self.dest, values)
def gpl_disclaimer(platforms):
cmakecache = 'WORK/ios-{arch}/cmake/CMakeCache.txt'.format(arch=platforms[0])
gpl_third_parties_enabled = "ENABLE_GPL_THIRD_PARTIES:BOOL=YES" in open(
cmakecache).read() or "ENABLE_GPL_THIRD_PARTIES:BOOL=ON" in open(cmakecache).read()
if gpl_third_parties_enabled:
warning("\n***************************************************************************"
"\n***************************************************************************"
"\n***** CAUTION, this liblinphone SDK is built using 3rd party GPL code *****"
"\n***** Even if you acquired a proprietary license from Belledonne *****"
"\n***** Communications, this SDK is GPL and GPL only. *****"
"\n***** To disable 3rd party gpl code, please use: *****"
"\n***** $ ./prepare.py -DENABLE_GPL_THIRD_PARTIES=NO *****"
"\n***************************************************************************"
"\n***************************************************************************")
else:
warning("\n***************************************************************************"
"\n***************************************************************************"
"\n***** Linphone SDK without 3rd party GPL software *****"
"\n***** If you acquired a proprietary license from Belledonne *****"
"\n***** Communications, this SDK can be used to create *****"
"\n***** a proprietary linphone-based application. *****"
"\n***************************************************************************"
"\n***************************************************************************")
def extract_from_xcode_project_with_regex(regex):
l = []
f = open('linphone.xcodeproj/project.pbxproj', 'r')
lines = f.readlines()
f.close()
for line in lines:
m = regex.search(line)
if m is not None:
l += [m.group(1)]
return list(set(l))
def extract_deployment_target():
regex = re.compile("IPHONEOS_DEPLOYMENT_TARGET = (.*);")
return extract_from_xcode_project_with_regex(regex)[0]
def extract_libs_list():
# name = libspeexdsp.a; path = "liblinphone-sdk/apple-darwin/lib/libspeexdsp.a"; sourceTree = "<group>"; };
regex = re.compile("name = \"*(lib\S+)\.a(\")*; path = \"liblinphone-sdk/apple-darwin/")
return extract_from_xcode_project_with_regex(regex)
missing_dependencies = {}
def check_is_installed(binary, prog=None, warn=True):
if not find_executable(binary):
if warn:
missing_dependencies[binary] = prog
# error("Could not find {}. Please install {}.".format(binary, prog))
return False
return True
def detect_package_manager():
if find_executable("brew"):
return "brew"
elif find_executable("port"):
return "sudo port"
else:
error(
"No package manager found. Please README or install brew using:\n\truby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"")
return "brew"
def check_tools():
package_manager_info = {"brew-pkg-config": "pkg-config",
"sudo port-pkg-config": "pkgconfig",
"brew-binary-path": "/usr/local/bin/",
"sudo port-binary-path": "/opt/local/bin/"
}
reterr = 0
if " " in os.path.dirname(os.path.realpath(__file__)):
error("Invalid location: linphone-iphone path should not contain any spaces.")
reterr = 1
for prog in ["autoconf", "automake", "doxygen", "java", "nasm", "cmake", "wget", "yasm", "optipng"]:
reterr |= not check_is_installed(prog, prog)
reterr |= not check_is_installed("pkg-config", package_manager_info[detect_package_manager() + "-pkg-config"])
reterr |= not check_is_installed("ginstall", "coreutils")
reterr |= not check_is_installed("intltoolize", "intltool")
reterr |= not check_is_installed("convert", "imagemagick")
if find_executable("nasm"):
nasm_output = Popen("nasm -f elf32".split(" "), stderr=PIPE, stdout=PIPE).stderr.read()
if "fatal: unrecognised output format" in nasm_output:
missing_dependencies["nasm"] = "nasm"
reterr = 1
if check_is_installed("libtoolize", warn=False):
if not check_is_installed("glibtoolize", "libtool"):
glibtoolize_path = find_executable("glibtoolize")
reterr = 1
msg = "Please do a symbolic link from glibtoolize to libtoolize:\n\tln -s {} ${}"
error(msg.format(glibtoolize_path, glibtoolize_path.replace("glibtoolize", "libtoolize")))
# list all missing packages to install
if missing_dependencies:
error("The following binaries are missing: {}. Please install them using:\n\t{} install {}".format(
" ".join(missing_dependencies.keys()),
detect_package_manager(),
" ".join(missing_dependencies.values())))
devnull = open(os.devnull, 'wb')
# just ensure that JDK is installed - if not, it will automatically display a popup to user
p = Popen("java -version".split(" "), stderr=devnull, stdout=devnull)
p.wait()
if p.returncode != 0:
error("Please install Java JDK (not just JRE).")
reterr = 1
# needed by x264
if not find_executable("gas-preprocessor.pl"):
error("""Could not find gas-preprocessor.pl, please install it:
wget --no-check-certificate https://raw.githubusercontent.com/FFmpeg/gas-preprocessor/master/gas-preprocessor.pl && \\
chmod +x gas-preprocessor.pl && \\
sudo mv gas-preprocessor.pl {}""".format(package_manager_info[detect_package_manager() + "-binary-path"]))
reterr = 1
if not os.path.isdir("submodules/linphone/mediastreamer2/src") or not os.path.isdir("submodules/linphone/oRTP/src"):
error("Missing some git submodules. Did you run:\n\tgit submodule update --init --recursive")
reterr = 1
p = Popen("xcrun --sdk iphoneos --show-sdk-path".split(" "), stdout=devnull, stderr=devnull)
p.wait()
if p.returncode != 0:
error("iOS SDK not found, please install Xcode from AppStore or equivalent.")
reterr = 1
else:
xcode_version = int(
Popen("xcodebuild -version".split(" "), stdout=PIPE).stdout.read().split("\n")[0].split(" ")[1].split(".")[0])
if xcode_version < 7:
sdk_platform_path = Popen(
"xcrun --sdk iphonesimulator --show-sdk-platform-path".split(" "),
stdout=PIPE, stderr=devnull).stdout.read()[:-1]
sdk_strings_path = "{}/{}".format(sdk_platform_path, "Developer/usr/bin/strings")
if not os.path.isfile(sdk_strings_path):
strings_path = find_executable("strings")
error("strings binary missing, please run:\n\tsudo ln -s {} {}".format(strings_path, sdk_strings_path))
reterr = 1
return reterr
def install_git_hook():
git_hook_path = ".git{sep}hooks{sep}pre-commit".format(sep=os.sep)
if os.path.isdir(".git{sep}hooks".format(sep=os.sep)) and not os.path.isfile(git_hook_path):
info("Installing Git pre-commit hook")
shutil.copyfile(".git-pre-commit", git_hook_path)
os.chmod(git_hook_path, 0755)
def generate_makefile(platforms, generator):
arch_targets = ""
for arch in platforms:
arch_targets += """
{arch}: {arch}-build
{arch}-build:
\t{generator} WORK/ios-{arch}/cmake
\t@echo "Done"
""".format(arch=arch, generator=generator)
multiarch = ""
for arch in platforms[1:]:
multiarch += \
"""\tif test -f "$${arch}_path"; then \\
\t\tall_paths=`echo $$all_paths $${arch}_path`; \\
\t\tall_archs="$$all_archs,{arch}" ; \\
\telse \\
\t\techo "WARNING: archive `basename $$archive` exists in {first_arch} tree but does not exists in {arch} tree: $${arch}_path."; \\
\tfi; \\
""".format(first_arch=platforms[0], arch=arch)
makefile = """
archs={archs}
LINPHONE_IPHONE_VERSION=$(shell git describe --always)
.PHONY: all
.SILENT: sdk
all: build
sdk:
\tarchives=`find liblinphone-sdk/{first_arch}-apple-darwin.ios -name '*.a'` && \\
\trm -rf liblinphone-sdk/apple-darwin && \\
\tmkdir -p liblinphone-sdk/apple-darwin && \\
\tcp -rf liblinphone-sdk/{first_arch}-apple-darwin.ios/include liblinphone-sdk/apple-darwin/. && \\
\tcp -rf liblinphone-sdk/{first_arch}-apple-darwin.ios/share liblinphone-sdk/apple-darwin/. && \\
\tfor archive in $$archives ; do \\
\t\tarmv7_path=`echo $$archive | sed -e "s/{first_arch}/armv7/"`; \\
\t\tarm64_path=`echo $$archive | sed -e "s/{first_arch}/arm64/"`; \\
\t\ti386_path=`echo $$archive | sed -e "s/{first_arch}/i386/"`; \\
\t\tx86_64_path=`echo $$archive | sed -e "s/{first_arch}/x86_64/"`; \\
\t\tdestpath=`echo $$archive | sed -e "s/-debug//" | sed -e "s/{first_arch}-//" | sed -e "s/\.ios//"`; \\
\t\tall_paths=`echo $$archive`; \\
\t\tall_archs="{first_arch}"; \\
\t\tmkdir -p `dirname $$destpath`; \\
\t\t{multiarch} \\
\t\techo "[{archs}] Mixing `basename $$archive` in $$destpath"; \\
\t\tlipo -create $$all_paths -output $$destpath; \\
\tdone; \\
\tif test -s WORK/ios-{first_arch}/Build/dummy_libraries/dummy_libraries.txt; then \\
\t\techo 'NOTE: the following libraries were STUBBED:'; \\
\t\tcat WORK/ios-{first_arch}/Build/dummy_libraries/dummy_libraries.txt; \\
\tfi
build: $(addsuffix -build, $(archs))
\t$(MAKE) sdk
ipa: build
\txcodebuild -configuration Release \\
\t&& xcrun -sdk iphoneos PackageApplication -v build/Release-iphoneos/linphone.app -o $$PWD/linphone-iphone.ipa
zipsdk: sdk
\techo "Generating SDK zip file for version $(LINPHONE_IPHONE_VERSION)"
\tzip -r liblinphone-iphone-sdk-$(LINPHONE_IPHONE_VERSION).zip \\
\tliblinphone-sdk/apple-darwin \\
\tliblinphone-tutorials \\
\t-x liblinphone-tutorials/hello-world/build\* \\
\t-x liblinphone-tutorials/hello-world/hello-world.xcodeproj/*.pbxuser \\
\t-x liblinphone-tutorials/hello-world/hello-world.xcodeproj/*.mode1v3
pull-transifex:
\ttx pull -af
push-transifex:
\t./Tools/i18n_generate_strings_files.sh && \\
\ttx push -s -f --no-interactive
zipres:
\t@tar -czf ios_assets.tar.gz Resources iTunesArtwork
{arch_targets}
help-prepare-options:
\t@echo "prepare.py was previously executed with the following options:"
\t@echo " {options}"
help: help-prepare-options
\t@echo ""
\t@echo "(please read the README.md file first)"
\t@echo ""
\t@echo "Available architectures: {archs}"
\t@echo ""
\t@echo "Available targets:"
\t@echo ""
\t@echo " * all or build: builds all architectures and creates the liblinphone SDK"
\t@echo " * sdk: creates the liblinphone SDK. Use this only after a full build"
\t@echo " * zipsdk: generates a ZIP archive of liblinphone-sdk/apple-darwin containing the SDK. Use this only after SDK is built."
\t@echo " * zipres: creates a tar.gz file with all the resources (images)"
\t@echo ""
""".format(archs=' '.join(platforms), arch_opts='|'.join(platforms),
first_arch=platforms[0], options=' '.join(sys.argv),
arch_targets=arch_targets,
multiarch=multiarch, generator=generator)
f = open('Makefile', 'w')
f.write(makefile)
f.close()
gpl_disclaimer(platforms)
def list_features_with_args(debug, additional_args):
tmpdir = tempfile.mkdtemp(prefix="linphone-iphone")
tmptarget = IOSarm64Target()
tmptarget.abs_cmake_dir = tmpdir
option_regex = re.compile("ENABLE_(.*):(.*)=(.*)")
options = {}
ended = True
build_type = 'Debug' if debug else 'Release'
for line in Popen(tmptarget.cmake_command(build_type, False, True, additional_args, verbose=False),
cwd=tmpdir, shell=False, stdout=PIPE).stdout.readlines():
match = option_regex.match(line)
if match is not None:
(name, typeof, value) = match.groups()
options["ENABLE_{}".format(name)] = value
ended &= (value == 'ON')
shutil.rmtree(tmpdir)
return (options, ended)
def list_features(debug, args):
additional_args = args
options = {}
info("Searching for available features...")
# We have to iterate multiple times to activate ALL options, so that options depending
# of others are also listed (cmake_dependent_option macro will not output options if
# prerequisite is not met)
while True:
(options, ended) = list_features_with_args(debug, additional_args)
if ended:
break
else:
additional_args = []
# Activate ALL available options
for k in options.keys():
additional_args.append("-D{}=ON".format(k))
# Now that we got the list of ALL available options, we must correct default values
# Step 1: all options are turned off by default
for x in options.keys():
options[x] = 'OFF'
# Step 2: except options enabled when running with default args
(options_tmp, ended) = list_features_with_args(debug, args)
final_dict = dict(options.items() + options_tmp.items())
notice_features = "Here are available features:"
for k, v in final_dict.items():
notice_features += "\n\t{}={}".format(k, v)
info(notice_features)
info("To enable some feature, please use -DENABLE_SOMEOPTION=ON (example: -DENABLE_OPUS=ON)")
info("Similarly, to disable some feature, please use -DENABLE_SOMEOPTION=OFF (example: -DENABLE_OPUS=OFF)")
def main(argv=None):
basicConfig(format="%(levelname)s: %(message)s", level=INFO)
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(
description="Prepare build of Linphone and its dependencies.")
argparser.add_argument(
'-c', '-C', '--clean', help="Clean a previous build instead of preparing a build.", action='store_true')
argparser.add_argument(
'-d', '--debug', help="Prepare a debug build, eg. add debug symbols and use no optimizations.", action='store_true')
argparser.add_argument(
'-dv', '--debug-verbose', help="Activate ms_debug logs.", action='store_true')
argparser.add_argument(
'-f', '--force', help="Force preparation, even if working directory already exist.", action='store_true')
argparser.add_argument(
'--disable-gpl-third-parties', help="Disable GPL third parties such as FFMpeg, x264.", action='store_true')
argparser.add_argument(
'--enable-gpl-third-parties', help="Enable GPL third parties such as FFMpeg, x264.", action='store_true')
argparser.add_argument(
'--enable-non-free-codecs', help="Enable non-free codecs such as OpenH264, MPEG4, etc.. Final application must comply with their respective license (see README.md).", action='store_true')
argparser.add_argument(
'--build-all-codecs', help="Build all codecs including non-free. Final application must comply with their respective license (see README.md).", action='store_true')
argparser.add_argument(
'-G', '--generator', help="CMake build system generator (default: Unix Makefiles, use cmake -h to get the complete list).", default='Unix Makefiles', dest='generator')
argparser.add_argument(
'-lf', '--list-features', help="List optional features and their default values.", action='store_true', dest='list_features')
argparser.add_argument(
'-t', '--tunnel', help="Enable Tunnel.", action='store_true')
argparser.add_argument('platform', nargs='*', action=PlatformListAction, default=[
'x86_64', 'devices'], help="The platform to build for (default is 'x86_64 devices'). Space separated architectures in list: {0}.".format(', '.join([repr(platform) for platform in platforms])))
argparser.add_argument(
'-L', '--list-cmake-variables', help="(debug) List non-advanced CMake cache variables.", action='store_true', dest='list_cmake_variables')
args, additional_args2 = argparser.parse_known_args()
additional_args = []
additional_args += ["-G", args.generator]
if check_tools() != 0:
return 1
additional_args += ["-DLINPHONE_IOS_DEPLOYMENT_TARGET=" + extract_deployment_target()]
additional_args += ["-DLINPHONE_BUILDER_DUMMY_LIBRARIES=" + ' '.join(extract_libs_list())]
if args.debug_verbose is True:
additional_args += ["-DENABLE_DEBUG_LOGS=YES"]
if args.enable_non_free_codecs is True:
additional_args += ["-DENABLE_NON_FREE_CODECS=YES"]
if args.build_all_codecs is True:
additional_args += ["-DENABLE_GPL_THIRD_PARTIES=YES"]
additional_args += ["-DENABLE_NON_FREE_CODECS=YES"]
additional_args += ["-DENABLE_AMRNB=YES"]
additional_args += ["-DENABLE_AMRWB=YES"]
additional_args += ["-DENABLE_G729=YES"]
additional_args += ["-DENABLE_GSM=YES"]
additional_args += ["-DENABLE_ILBC=YES"]
additional_args += ["-DENABLE_ISAC=YES"]
additional_args += ["-DENABLE_OPUS=YES"]
additional_args += ["-DENABLE_SILK=YES"]
additional_args += ["-DENABLE_SPEEX=YES"]
additional_args += ["-DENABLE_FFMPEG=YES"]
additional_args += ["-DENABLE_H263=YES"]
additional_args += ["-DENABLE_H263P=YES"]
additional_args += ["-DENABLE_MPEG4=YES"]
additional_args += ["-DENABLE_OPENH264=YES"]
additional_args += ["-DENABLE_VPX=YES"]
additional_args += ["-DENABLE_X264=NO"]
if args.disable_gpl_third_parties is True:
additional_args += ["-DENABLE_GPL_THIRD_PARTIES=NO"]
if args.enable_gpl_third_parties is True:
additional_args += ["-DENABLE_GPL_THIRD_PARTIES=YES"]
if args.tunnel:
if not os.path.isdir("submodules/tunnel"):
info("Tunnel wanted but not found yet, trying to clone it...")
p = Popen("git clone gitosis@git.linphone.org:tunnel.git submodules/tunnel".split(" "))
p.wait()
if p.returncode != 0:
error("Could not clone tunnel. Please see http://www.belledonne-communications.com/voiptunnel.html")
return 1
warning("Tunnel enabled, disabling GPL third parties.")
additional_args += ["-DENABLE_TUNNEL=ON", "-DENABLE_GPL_THIRD_PARTIES=OFF", "-DENABLE_FFMPEG=NO"]
# User's options are priority upon all automatic options
additional_args += additional_args2
if args.list_features:
list_features(args.debug, additional_args)
return 0
selected_platforms_dup = []
for platform in args.platform:
if platform == 'all':
selected_platforms_dup += archs_device + archs_simu
elif platform == 'devices':
selected_platforms_dup += archs_device
elif platform == 'simulators':
selected_platforms_dup += archs_simu
else:
selected_platforms_dup += [platform]
# unify platforms but keep provided order
selected_platforms = []
for x in selected_platforms_dup:
if x not in selected_platforms:
selected_platforms.append(x)
if os.path.isdir('WORK') and not args.clean and not args.force:
warning("Working directory WORK already exists. Please remove it (option -C or -c) before re-executing CMake "
"to avoid conflicts between executions, or force execution (option -f) if you are aware of consequences.")
if os.path.isfile('Makefile'):
Popen("make help-prepare-options".split(" "))
return 0
for platform in selected_platforms:
target = targets[platform]
if args.clean:
target.clean()
else:
retcode = prepare.run(target, args.debug, False, args.list_cmake_variables, args.force, additional_args)
if retcode != 0:
return retcode
if args.clean:
if os.path.isfile('Makefile'):
os.remove('Makefile')
elif selected_platforms:
install_git_hook()
# only generated makefile if we are using Ninja or Makefile
if args.generator == 'Ninja':
if not check_is_installed("ninja", "it"):
return 1
generate_makefile(selected_platforms, 'ninja -C')
elif args.generator == "Unix Makefiles":
generate_makefile(selected_platforms, '$(MAKE) -C')
elif args.generator == "Xcode":
info("You can now open Xcode project with: open WORK/cmake/Project.xcodeproj")
else:
info("Not generating meta-makefile for generator {}.".format(args.generator))
return 0
if __name__ == "__main__":
sys.exit(main())
|
pedrobaeza/odoo | refs/heads/master | addons/mrp/wizard/mrp_price.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_price(osv.osv_memory):
_name = 'mrp.product_price'
_description = 'Product Price'
_columns = {
'number': fields.integer('Quantity', required=True, help="Specify quantity of products to produce or buy. Report of Cost structure will be displayed base on this quantity."),
}
_defaults = {
'number': 1,
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Product cost structure
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['number'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'product.price',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nojhan/weboob-devel | refs/heads/master | modules/societegenerale/pages/login.py | 6 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from base64 import b64decode
from logging import error
import re
from weboob.tools.json import json
from weboob.deprecated.browser import BrowserUnavailable
from weboob.deprecated.mech import ClientForm
from .base import BasePage
from ..captcha import Captcha, TileError
class LoginPage(BasePage):
STRANGE_KEY = ["180","149","244","125","115","058","017","071","075","119","167","040","066","083","254","151","212","245","193","224","006","068","139","054","089","083","111","208","105","235","109","030","130","226","155","245","157","044","061","233","036","101","145","103","185","017","126","142","007","192","239","140","133","250","194","222","079","178","048","184","158","158","086","160","001","114","022","158","030","210","008","067","056","026","042","113","043","169","128","051","107","112","063","240","108","003","079","059","053","127","116","084","157","203","244","031","062","012","062","093"]
strange_map = None
def on_loaded(self):
for td in self.document.getroot().cssselect('td.LibelleErreur'):
if td.text is None:
continue
msg = td.text.strip()
if 'indisponible' in msg:
raise BrowserUnavailable(msg)
def decode_grid(self, infos):
grid = b64decode(infos['grid'])
grid = map(int, re.findall('[0-9]{3}', grid))
n = int(infos['nbrows']) * int(infos['nbcols'])
self.strange_map = list(grid[:n])
grid = list(grid[n:])
new_grid = list(grid)
s = n
u = list(infos['crypto'])
for j in xrange(s):
u[j] = '%02d' % ord(u[j])
for i in xrange(5, 0, -1):
for j in xrange(s):
new_grid[i*s+j] = '%03d' % (new_grid[i*s+j]^new_grid[(i-1)*s+j])
for j in xrange(s):
new_grid[j] = '%03d' % (new_grid[j]^int(self.STRANGE_KEY[j])^self.strange_map[j])
for j in xrange(s):
self.strange_map[j] = int(u[j])^self.strange_map[j]
return new_grid
def login(self, login, password):
DOMAIN_LOGIN = self.browser.DOMAIN_LOGIN
DOMAIN = self.browser.DOMAIN
url_login = 'https://' + DOMAIN_LOGIN + '/index.html'
base_url = 'https://' + DOMAIN
url = base_url + '//sec/vkm/gen_crypto?estSession=0'
headers = {
'Referer': url_login
}
request = self.browser.request_class(url, None, headers)
infos_data = self.browser.readurl(request)
infos_data = re.match('^_vkCallback\((.*)\);$', infos_data).group(1)
infos = json.loads(infos_data.replace("'", '"'))
infos['grid'] = self.decode_grid(infos)
url = base_url + '//sec/vkm/gen_ui?modeClavier=0&cryptogramme=' + infos["crypto"]
img = Captcha(self.browser.openurl(url), infos)
try:
img.build_tiles()
except TileError as err:
error("Error: %s" % err)
if err.tile:
err.tile.display()
self.browser.select_form('n2g_authentification')
self.browser.controls.append(ClientForm.TextControl('text', 'codsec', {'value': ''}))
self.browser.controls.append(ClientForm.TextControl('text', 'cryptocvcs', {'value': ''}))
self.browser.controls.append(ClientForm.TextControl('text', 'vkm_op', {'value': 'auth'}))
self.browser.set_all_readonly(False)
pwd = img.get_codes(password[:6])
t = pwd.split(',')
newpwd = ','.join([t[self.strange_map[j]] for j in xrange(6)])
self.browser['codcli'] = login.encode('iso-8859-1')
self.browser['user_id'] = login.encode('iso-8859-1')
self.browser['codsec'] = newpwd
self.browser['cryptocvcs'] = infos["crypto"].encode('iso-8859-1')
self.browser.form.action = 'https://particuliers.secure.societegenerale.fr//acces/authlgn.html'
self.browser.submit(nologin=True)
class BadLoginPage(BasePage):
pass
|
kingvuplus/EGAMI-dvbapp | refs/heads/master | tools/create_picon_sats.py | 193 | #
# create symlinks for picons
# usage: create_picon_sats lamedb
# run in picon directory.
# It will read the servicenames from the lamedb and create symlinks
# for the servicereference names.
#
# by pieterg, 2008
import os, sys
f = open(sys.argv[1]).readlines()
f = f[f.index("services\n")+1:-3]
while len(f) > 2:
ref = [int(x, 0x10) for x in f[0][:-1].split(':')]
name = f[1][:-1]
name = name.replace('\xc2\x87', '').replace('\xc2\x86', '')
fields = f[2].split(',')
if len(fields) and fields[0][0] is 'p':
provider = fields[0].split(':')[1]
else:
provider = 'unknown'
if ref[4] == 2:
servicetype = 'radio'
else:
ref[4] = 1
servicetype = 'tv'
sat = str(ref[1]/16/16/16/16)
# SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1)
# X X X X D D
# REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED
# D D X X X X X X X X
refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1])
refstr = refstr.replace(':', '_')
filename = name + ".png"
linkname = refstr + ".png"
filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '').replace('*', '_').replace('?', '_').replace(' ', '_').replace('(', '_').replace(')', '_').replace('|', '_')
provider = provider.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '').replace('*', '_').replace('?', '_').replace(' ', '_').replace('(', '_').replace(')', '_').replace('|', '_')
filename = filename.replace('\n', '')
provider = provider.replace('\n', '')
for i in range(len(filename)):
if ord(filename[i]) > 127:
filename = filename[0:i] + '_' + filename[i + 1:]
for i in range(len(provider)):
if ord(provider[i]) > 127:
provider = provider[0:i] + '_' + provider[i + 1:]
if sat == "65535":
sat = "cable"
filename = sat + "_" + provider + "_" + servicetype + "_" + filename
else:
filename = sat + "_" + provider + "_" + servicetype + "_" + filename
sat = sat[0:2] + '.' + sat[-1:] + 'e'
#TODO: west
try:
os.makedirs(sat + '/' + servicetype)
except:
pass
try:
os.rename(linkname, sat + '/' + servicetype + '/' + filename)
except:
pass
try:
os.symlink(filename, sat + '/' + servicetype + '/' + linkname)
except:
pass
f =f[3:]
|
dreamhost/ceilometer | refs/heads/master | ceilometer/storage/impl_mongodb.py | 1 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# Copyright © 2013 eNovance
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend
"""
import copy
import datetime
import operator
import os
import re
import urlparse
import bson.code
import pymongo
from ceilometer.openstack.common import log
from ceilometer.storage import base
from ceilometer.storage import models
LOG = log.getLogger(__name__)
class MongoDBStorage(base.StorageEngine):
"""Put the data into a MongoDB database
Collections::
- user
- { _id: user id
source: [ array of source ids reporting for the user ]
}
- project
- { _id: project id
source: [ array of source ids reporting for the project ]
}
- meter
- the raw incoming data
- resource
- the metadata for resources
- { _id: uuid of resource,
metadata: metadata dictionaries
user_id: uuid
project_id: uuid
meter: [ array of {counter_name: string, counter_type: string,
counter_unit: string} ]
}
"""
OPTIONS = []
def register_opts(self, conf):
"""Register any configuration options used by this engine.
"""
conf.register_opts(self.OPTIONS)
def get_connection(self, conf):
"""Return a Connection instance based on the configuration settings.
"""
return Connection(conf)
def make_timestamp_range(start, end):
"""Given two possible datetimes, create the query
document to find timestamps within that range
using $gte for the lower bound and $lt for the
upper bound.
"""
ts_range = {}
if start:
ts_range['$gte'] = start
if end:
ts_range['$lt'] = end
return ts_range
def make_query_from_filter(event_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param filter: EventFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
q = {}
if event_filter.user:
q['user_id'] = event_filter.user
if event_filter.project:
q['project_id'] = event_filter.project
if event_filter.meter:
q['counter_name'] = event_filter.meter
elif require_meter:
raise RuntimeError('Missing required meter specifier')
ts_range = make_timestamp_range(event_filter.start, event_filter.end)
if ts_range:
q['timestamp'] = ts_range
if event_filter.resource:
q['resource_id'] = event_filter.resource
if event_filter.source:
q['source'] = event_filter.source
# so the samples call metadata resource_metadata, so we convert
# to that.
q.update(dict(('resource_%s' % k, v)
for (k, v) in event_filter.metaquery.iteritems()))
return q
class Connection(base.Connection):
"""MongoDB connection.
"""
_mim_instance = None
# MAP_TIMESTAMP and REDUCE_MIN_MAX are based on the recipe
# http://cookbook.mongodb.org/patterns/finding_max_and_min_values_for_a_key
MAP_TIMESTAMP = bson.code.Code("""
function () {
emit('timestamp', { min : this.timestamp,
max : this.timestamp } )
}
""")
REDUCE_MIN_MAX = bson.code.Code("""
function (key, values) {
var res = values[0];
for ( var i=1; i<values.length; i++ ) {
if ( values[i].min < res.min )
res.min = values[i].min;
if ( values[i].max > res.max )
res.max = values[i].max;
}
return res;
}
""")
MAP_STATS = bson.code.Code("""
function () {
emit('statistics', { min : this.counter_volume,
max : this.counter_volume,
sum : this.counter_volume,
count : NumberInt(1),
duration_start : this.timestamp,
duration_end : this.timestamp,
period_start : this.timestamp,
period_end : this.timestamp} )
}
""")
MAP_STATS_PERIOD = bson.code.Code("""
function () {
var period = %d * 1000;
var period_first = %d * 1000;
var period_start = period_first
+ (Math.floor(new Date(this.timestamp.getTime()
- period_first) / period)
* period);
emit(period_start,
{ min : this.counter_volume,
max : this.counter_volume,
sum : this.counter_volume,
count : NumberInt(1),
duration_start : this.timestamp,
duration_end : this.timestamp,
period_start : new Date(period_start),
period_end : new Date(period_start + period) } )
}
""")
REDUCE_STATS = bson.code.Code("""
function (key, values) {
var res = values[0];
for ( var i=1; i<values.length; i++ ) {
if ( values[i].min < res.min )
res.min = values[i].min;
if ( values[i].max > res.max )
res.max = values[i].max;
res.count += values[i].count;
res.sum += values[i].sum;
if ( values[i].duration_start < res.duration_start )
res.duration_start = values[i].duration_start;
if ( values[i].duration_end > res.duration_end )
res.duration_end = values[i].duration_end;
}
return res;
}
""")
FINALIZE_STATS = bson.code.Code("""
function (key, value) {
value.avg = value.sum / value.count;
value.duration = (value.duration_end - value.duration_start) / 1000;
value.period = NumberInt((value.period_end - value.period_start)
/ 1000);
return value;
}""")
def __init__(self, conf):
opts = self._parse_connection_url(conf.database_connection)
LOG.info('connecting to MongoDB on %s:%s', opts['host'], opts['port'])
if opts['host'] == '__test__':
url = os.environ.get('CEILOMETER_TEST_MONGODB_URL')
if url:
opts = self._parse_connection_url(url)
self.conn = pymongo.Connection(opts['host'],
opts['port'],
safe=True)
else:
# MIM will die if we have too many connections, so use a
# Singleton
if Connection._mim_instance is None:
try:
from ming import mim
except ImportError:
import nose
raise nose.SkipTest("Ming not found")
LOG.debug('Creating a new MIM Connection object')
Connection._mim_instance = mim.Connection()
self.conn = Connection._mim_instance
LOG.debug('Using MIM for test connection')
else:
self.conn = pymongo.Connection(opts['host'],
opts['port'],
safe=True)
self.db = getattr(self.conn, opts['dbname'])
if 'username' in opts:
self.db.authenticate(opts['username'], opts['password'])
# Establish indexes
#
# We need variations for user_id vs. project_id because of the
# way the indexes are stored in b-trees. The user_id and
# project_id values are usually mutually exclusive in the
# queries, so the database won't take advantage of an index
# including both.
for primary in ['user_id', 'project_id']:
self.db.resource.ensure_index([
(primary, pymongo.ASCENDING),
('source', pymongo.ASCENDING),
], name='resource_idx')
self.db.meter.ensure_index([
('resource_id', pymongo.ASCENDING),
(primary, pymongo.ASCENDING),
('counter_name', pymongo.ASCENDING),
('timestamp', pymongo.ASCENDING),
('source', pymongo.ASCENDING),
], name='meter_idx')
def upgrade(self, version=None):
pass
def clear(self):
if self._mim_instance is not None:
# Don't want to use drop_database() because
# may end up running out of spidermonkey instances.
# http://davisp.lighthouseapp.com/projects/26898/tickets/22
self.db.clear()
else:
self.conn.drop_database(self.db)
def _parse_connection_url(self, url):
opts = {}
result = urlparse.urlparse(url)
opts['dbtype'] = result.scheme
opts['dbname'] = result.path.replace('/', '')
netloc_match = re.match(r'(?:(\w+:\w+)@)?(.*)', result.netloc)
auth = netloc_match.group(1)
netloc = netloc_match.group(2)
if auth:
opts['username'], opts['password'] = auth.split(':')
if ':' in netloc:
opts['host'], port = netloc.split(':')
else:
opts['host'] = netloc
port = 27017
opts['port'] = port and int(port) or 27017
return opts
def record_metering_data(self, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
"""
# Make sure we know about the user and project
self.db.user.update(
{'_id': data['user_id']},
{'$addToSet': {'source': data['source'],
},
},
upsert=True,
)
self.db.project.update(
{'_id': data['project_id']},
{'$addToSet': {'source': data['source'],
},
},
upsert=True,
)
# Record the updated resource metadata
self.db.resource.update(
{'_id': data['resource_id']},
{'$set': {'project_id': data['project_id'],
'user_id': data['user_id'],
'metadata': data['resource_metadata'],
'source': data['source'],
},
'$addToSet': {'meter': {'counter_name': data['counter_name'],
'counter_type': data['counter_type'],
'counter_unit': data['counter_unit'],
},
},
},
upsert=True,
)
# Record the raw data for the event. Use a copy so we do not
# modify a data structure owned by our caller (the driver adds
# a new key '_id').
record = copy.copy(data)
self.db.meter.insert(record)
return
def get_users(self, source=None):
"""Return an iterable of user id strings.
:param source: Optional source filter.
"""
q = {}
if source is not None:
q['source'] = source
return sorted(self.db.user.find(q).distinct('_id'))
def get_projects(self, source=None):
"""Return an iterable of project id strings.
:param source: Optional source filter.
"""
q = {}
if source is not None:
q['source'] = source
return sorted(self.db.project.find(q).distinct('_id'))
def get_resources(self, user=None, project=None, source=None,
start_timestamp=None, end_timestamp=None,
metaquery={}, resource=None):
"""Return an iterable of models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param end_timestamp: Optional modified timestamp end range.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
"""
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if source is not None:
q['source'] = source
if resource is not None:
q['resource_id'] = resource
# Add resource_ prefix so it matches the field in the db
q.update(dict(('resource_' + k, v)
for (k, v) in metaquery.iteritems()))
# FIXME(dhellmann): This may not perform very well,
# but doing any better will require changing the database
# schema and that will need more thought than I have time
# to put into it today.
if start_timestamp or end_timestamp:
# Look for resources matching the above criteria and with
# samples in the time range we care about, then change the
# resource query to return just those resources by id.
ts_range = make_timestamp_range(start_timestamp, end_timestamp)
if ts_range:
q['timestamp'] = ts_range
# FIXME(jd): We should use self.db.meter.group() and not use the
# resource collection, but that's not supported by MIM, so it's not
# easily testable yet. Since it was bugged before anyway, it's still
# better for now.
resource_ids = self.db.meter.find(q).distinct('resource_id')
q = {'_id': {'$in': resource_ids}}
for resource in self.db.resource.find(q):
yield models.Resource(
resource_id=resource['_id'],
project_id=resource['project_id'],
user_id=resource['user_id'],
metadata=resource['metadata'],
meter=[
models.ResourceMeter(
counter_name=meter['counter_name'],
counter_type=meter['counter_type'],
counter_unit=meter['counter_unit'],
)
for meter in resource['meter']
],
)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery={}):
"""Return an iterable of models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
"""
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if resource is not None:
q['_id'] = resource
if source is not None:
q['source'] = source
q.update(metaquery)
for r in self.db.resource.find(q):
for r_meter in r['meter']:
yield models.Meter(
name=r_meter['counter_name'],
type=r_meter['counter_type'],
# Return empty string if 'counter_unit' is not valid for
# backward compaitiblity.
unit=r_meter.get('counter_unit', ''),
resource_id=r['_id'],
project_id=r['project_id'],
user_id=r['user_id'],
)
def get_samples(self, event_filter):
"""Return an iterable of samples as created by
:func:`ceilometer.meter.meter_message_from_counter`.
"""
q = make_query_from_filter(event_filter, require_meter=False)
samples = self.db.meter.find(q)
for s in samples:
# Remove the ObjectId generated by the database when
# the event was inserted. It is an implementation
# detail that should not leak outside of the driver.
del s['_id']
yield models.Sample(**s)
def get_meter_statistics(self, event_filter, period=None):
"""Return an iterable of models.Statistics instance containing meter
statistics described by the query parameters.
The filter must have a meter value set.
"""
q = make_query_from_filter(event_filter)
if period:
map_stats = self.MAP_STATS_PERIOD % \
(period,
int(event_filter.start.strftime('%s'))
if event_filter.start else 0)
else:
map_stats = self.MAP_STATS
results = self.db.meter.map_reduce(
map_stats,
self.REDUCE_STATS,
{'inline': 1},
finalize=self.FINALIZE_STATS,
query=q,
)
return sorted((models.Statistics(**(r['value']))
for r in results['results']),
key=operator.attrgetter('period_start'))
def _fix_interval_min_max(self, a_min, a_max):
if hasattr(a_min, 'valueOf') and a_min.valueOf is not None:
# NOTE (dhellmann): HACK ALERT
#
# The real MongoDB server can handle Date objects and
# the driver converts them to datetime instances
# correctly but the in-memory implementation in MIM
# (used by the tests) returns a spidermonkey.Object
# representing the "value" dictionary and there
# doesn't seem to be a way to recursively introspect
# that object safely to convert the min and max values
# back to datetime objects. In this method, we know
# what type the min and max values are expected to be,
# so it is safe to do the conversion
# here. JavaScript's time representation uses
# different units than Python's, so we divide to
# convert to the right units and then create the
# datetime instances to return.
#
# The issue with MIM is documented at
# https://sourceforge.net/p/merciless/bugs/3/
#
a_min = datetime.datetime.fromtimestamp(
a_min.valueOf() // 1000)
a_max = datetime.datetime.fromtimestamp(
a_max.valueOf() // 1000)
return (a_min, a_max)
def get_event_interval(self, event_filter):
"""Return the min and max timestamps from samples,
using the event_filter to limit the samples seen.
( datetime.datetime(), datetime.datetime() )
"""
q = make_query_from_filter(event_filter)
results = self.db.meter.map_reduce(self.MAP_TIMESTAMP,
self.REDUCE_MIN_MAX,
{'inline': 1},
query=q,
)
if results['results']:
answer = results['results'][0]['value']
return self._fix_interval_min_max(answer['min'], answer['max'])
return (None, None)
def require_map_reduce(conn):
"""Raises SkipTest if the connection is using mim.
"""
# NOTE(dhellmann): mim requires spidermonkey to implement the
# map-reduce functions, so if we can't import it then just
# skip these tests unless we aren't using mim.
try:
import spidermonkey
except BaseException:
if isinstance(conn.conn, mim.Connection):
import nose
raise nose.SkipTest('requires spidermonkey')
|
proxysh/Safejumper-for-Desktop | refs/heads/master | buildlinux/env32/lib/python2.7/site-packages/twisted/test/proto_helpers.py | 11 | # -*- test-case-name: twisted.test.test_stringtransport -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Assorted functionality which is commonly useful when writing unit tests.
"""
from __future__ import division, absolute_import
from socket import AF_INET, AF_INET6
from io import BytesIO
from zope.interface import implementer, implementedBy
from zope.interface.verify import verifyClass
from twisted.python import failure
from twisted.python.compat import unicode, intToBytes
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import (
ITransport, IConsumer, IPushProducer, IConnector,
IReactorCore, IReactorTCP, IReactorSSL, IReactorUNIX, IReactorSocket,
IListeningPort, IReactorFDSet,
)
from twisted.internet.abstract import isIPv6Address
from twisted.internet.error import UnsupportedAddressFamily
from twisted.protocols import basic
from twisted.internet import protocol, error, address, task
from twisted.internet.task import Clock
from twisted.internet.address import IPv4Address, UNIXAddress, IPv6Address
class AccumulatingProtocol(protocol.Protocol):
"""
L{AccumulatingProtocol} is an L{IProtocol} implementation which collects
the data delivered to it and can fire a Deferred when it is connected or
disconnected.
@ivar made: A flag indicating whether C{connectionMade} has been called.
@ivar data: Bytes giving all the data passed to C{dataReceived}.
@ivar closed: A flag indicated whether C{connectionLost} has been called.
@ivar closedReason: The value of the I{reason} parameter passed to
C{connectionLost}.
@ivar closedDeferred: If set to a L{Deferred}, this will be fired when
C{connectionLost} is called.
"""
made = closed = 0
closedReason = None
closedDeferred = None
data = b""
factory = None
def connectionMade(self):
self.made = 1
if (self.factory is not None and
self.factory.protocolConnectionMade is not None):
d = self.factory.protocolConnectionMade
self.factory.protocolConnectionMade = None
d.callback(self)
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
self.closed = 1
self.closedReason = reason
if self.closedDeferred is not None:
d, self.closedDeferred = self.closedDeferred, None
d.callback(None)
class LineSendingProtocol(basic.LineReceiver):
lostConn = False
def __init__(self, lines, start = True):
self.lines = lines[:]
self.response = []
self.start = start
def connectionMade(self):
if self.start:
for line in self.lines:
self.sendLine(line)
def lineReceived(self, line):
if not self.start:
for line in self.lines:
self.sendLine(line)
self.lines = []
self.response.append(line)
def connectionLost(self, reason):
self.lostConn = True
class FakeDatagramTransport:
noAddr = object()
def __init__(self):
self.written = []
def write(self, packet, addr=noAddr):
self.written.append((packet, addr))
@implementer(ITransport, IConsumer, IPushProducer)
class StringTransport:
"""
A transport implementation which buffers data in memory and keeps track of
its other state without providing any behavior.
L{StringTransport} has a number of attributes which are not part of any of
the interfaces it claims to implement. These attributes are provided for
testing purposes. Implementation code should not use any of these
attributes; they are not provided by other transports.
@ivar disconnecting: A C{bool} which is C{False} until L{loseConnection} is
called, then C{True}.
@ivar producer: If a producer is currently registered, C{producer} is a
reference to it. Otherwise, L{None}.
@ivar streaming: If a producer is currently registered, C{streaming} refers
to the value of the second parameter passed to C{registerProducer}.
@ivar hostAddr: L{None} or an object which will be returned as the host
address of this transport. If L{None}, a nasty tuple will be returned
instead.
@ivar peerAddr: L{None} or an object which will be returned as the peer
address of this transport. If L{None}, a nasty tuple will be returned
instead.
@ivar producerState: The state of this L{StringTransport} in its capacity
as an L{IPushProducer}. One of C{'producing'}, C{'paused'}, or
C{'stopped'}.
@ivar io: A L{io.BytesIO} which holds the data which has been written to
this transport since the last call to L{clear}. Use L{value} instead
of accessing this directly.
@ivar _lenient: By default L{StringTransport} enforces that
L{resumeProducing} is not called after the connection is lost. This is
to ensure that any code that does call L{resumeProducing} after the
connection is lost is not blindly expecting L{resumeProducing} to have
any impact.
However, if your test case is calling L{resumeProducing} after
connection close on purpose, and you know it won't block expecting
further data to show up, this flag may safely be set to L{True}.
Defaults to L{False}.
@type lenient: L{bool}
"""
disconnecting = False
producer = None
streaming = None
hostAddr = None
peerAddr = None
producerState = 'producing'
def __init__(self, hostAddress=None, peerAddress=None, lenient=False):
self.clear()
if hostAddress is not None:
self.hostAddr = hostAddress
if peerAddress is not None:
self.peerAddr = peerAddress
self.connected = True
self._lenient = lenient
def clear(self):
"""
Discard all data written to this transport so far.
This is not a transport method. It is intended for tests. Do not use
it in implementation code.
"""
self.io = BytesIO()
def value(self):
"""
Retrieve all data which has been buffered by this transport.
This is not a transport method. It is intended for tests. Do not use
it in implementation code.
@return: A C{bytes} giving all data written to this transport since the
last call to L{clear}.
@rtype: C{bytes}
"""
return self.io.getvalue()
# ITransport
def write(self, data):
if isinstance(data, unicode): # no, really, I mean it
raise TypeError("Data must not be unicode")
self.io.write(data)
def writeSequence(self, data):
self.io.write(b''.join(data))
def loseConnection(self):
"""
Close the connection. Does nothing besides toggle the C{disconnecting}
instance variable to C{True}.
"""
self.disconnecting = True
def abortConnection(self):
"""
Abort the connection. Same as C{loseConnection}.
"""
self.loseConnection()
def getPeer(self):
if self.peerAddr is None:
return address.IPv4Address('TCP', '192.168.1.1', 54321)
return self.peerAddr
def getHost(self):
if self.hostAddr is None:
return address.IPv4Address('TCP', '10.0.0.1', 12345)
return self.hostAddr
# IConsumer
def registerProducer(self, producer, streaming):
if self.producer is not None:
raise RuntimeError("Cannot register two producers")
self.producer = producer
self.streaming = streaming
def unregisterProducer(self):
if self.producer is None:
raise RuntimeError(
"Cannot unregister a producer unless one is registered")
self.producer = None
self.streaming = None
# IPushProducer
def _checkState(self):
if self.disconnecting and not self._lenient:
raise RuntimeError(
"Cannot resume producing after loseConnection")
if self.producerState == 'stopped':
raise RuntimeError("Cannot resume a stopped producer")
def pauseProducing(self):
self._checkState()
self.producerState = 'paused'
def stopProducing(self):
self.producerState = 'stopped'
def resumeProducing(self):
self._checkState()
self.producerState = 'producing'
class StringTransportWithDisconnection(StringTransport):
"""
A L{StringTransport} which can be disconnected.
"""
def loseConnection(self):
if self.connected:
self.connected = False
self.protocol.connectionLost(
failure.Failure(error.ConnectionDone("Bye.")))
class StringIOWithoutClosing(BytesIO):
"""
A BytesIO that can't be closed.
"""
def close(self):
"""
Do nothing.
"""
@implementer(IListeningPort)
class _FakePort(object):
"""
A fake L{IListeningPort} to be used in tests.
@ivar _hostAddress: The L{IAddress} this L{IListeningPort} is pretending
to be listening on.
"""
def __init__(self, hostAddress):
"""
@param hostAddress: An L{IAddress} this L{IListeningPort} should
pretend to be listening on.
"""
self._hostAddress = hostAddress
def startListening(self):
"""
Fake L{IListeningPort.startListening} that doesn't do anything.
"""
def stopListening(self):
"""
Fake L{IListeningPort.stopListening} that doesn't do anything.
"""
def getHost(self):
"""
Fake L{IListeningPort.getHost} that returns our L{IAddress}.
"""
return self._hostAddress
@implementer(IConnector)
class _FakeConnector(object):
"""
A fake L{IConnector} that allows us to inspect if it has been told to stop
connecting.
@ivar stoppedConnecting: has this connector's
L{_FakeConnector.stopConnecting} method been invoked yet?
@ivar _address: An L{IAddress} provider that represents our destination.
"""
_disconnected = False
stoppedConnecting = False
def __init__(self, address):
"""
@param address: An L{IAddress} provider that represents this
connector's destination.
"""
self._address = address
def stopConnecting(self):
"""
Implement L{IConnector.stopConnecting} and set
L{_FakeConnector.stoppedConnecting} to C{True}
"""
self.stoppedConnecting = True
def disconnect(self):
"""
Implement L{IConnector.disconnect} as a no-op.
"""
self._disconnected = True
def connect(self):
"""
Implement L{IConnector.connect} as a no-op.
"""
def getDestination(self):
"""
Implement L{IConnector.getDestination} to return the C{address} passed
to C{__init__}.
"""
return self._address
@implementer(
IReactorCore,
IReactorTCP, IReactorSSL, IReactorUNIX, IReactorSocket, IReactorFDSet
)
class MemoryReactor(object):
"""
A fake reactor to be used in tests. This reactor doesn't actually do
much that's useful yet. It accepts TCP connection setup attempts, but
they will never succeed.
@ivar hasInstalled: Keeps track of whether this reactor has been installed.
@type hasInstalled: L{bool}
@ivar running: Keeps track of whether this reactor is running.
@type running: L{bool}
@ivar hasStopped: Keeps track of whether this reactor has been stopped.
@type hasStopped: L{bool}
@ivar hasCrashed: Keeps track of whether this reactor has crashed.
@type hasCrashed: L{bool}
@ivar whenRunningHooks: Keeps track of hooks registered with
C{callWhenRunning}.
@type whenRunningHooks: L{list}
@ivar triggers: Keeps track of hooks registered with
C{addSystemEventTrigger}.
@type triggers: L{dict}
@ivar tcpClients: Keeps track of connection attempts (ie, calls to
C{connectTCP}).
@type tcpClients: L{list}
@ivar tcpServers: Keeps track of server listen attempts (ie, calls to
C{listenTCP}).
@type tcpServers: L{list}
@ivar sslClients: Keeps track of connection attempts (ie, calls to
C{connectSSL}).
@type sslClients: L{list}
@ivar sslServers: Keeps track of server listen attempts (ie, calls to
C{listenSSL}).
@type sslServers: L{list}
@ivar unixClients: Keeps track of connection attempts (ie, calls to
C{connectUNIX}).
@type unixClients: L{list}
@ivar unixServers: Keeps track of server listen attempts (ie, calls to
C{listenUNIX}).
@type unixServers: L{list}
@ivar adoptedPorts: Keeps track of server listen attempts (ie, calls to
C{adoptStreamPort}).
@ivar adoptedStreamConnections: Keeps track of stream-oriented
connections added using C{adoptStreamConnection}.
"""
def __init__(self):
"""
Initialize the tracking lists.
"""
self.hasInstalled = False
self.running = False
self.hasRun = True
self.hasStopped = True
self.hasCrashed = True
self.whenRunningHooks = []
self.triggers = {}
self.tcpClients = []
self.tcpServers = []
self.sslClients = []
self.sslServers = []
self.unixClients = []
self.unixServers = []
self.adoptedPorts = []
self.adoptedStreamConnections = []
self.connectors = []
self.readers = set()
self.writers = set()
def install(self):
"""
Fake install callable to emulate reactor module installation.
"""
self.hasInstalled = True
def resolve(self, name, timeout=10):
"""
Not implemented; raises L{NotImplementedError}.
"""
raise NotImplementedError()
def run(self):
"""
Fake L{IReactorCore.run}.
Sets C{self.running} to L{True}, runs all of the hooks passed to
C{self.callWhenRunning}, then calls C{self.stop} to simulate a request
to stop the reactor.
Sets C{self.hasRun} to L{True}.
"""
assert self.running is False
self.running = True
self.hasRun = True
for f, args, kwargs in self.whenRunningHooks:
f(*args, **kwargs)
self.stop()
# That we stopped means we can return, phew.
def stop(self):
"""
Fake L{IReactorCore.run}.
Sets C{self.running} to L{False}.
Sets C{self.hasStopped} to L{True}.
"""
self.running = False
self.hasStopped = True
def crash(self):
"""
Fake L{IReactorCore.crash}.
Sets C{self.running} to L{None}, because that feels crashy.
Sets C{self.hasCrashed} to L{True}.
"""
self.running = None
self.hasCrashed = True
def iterate(self, delay=0):
"""
Not implemented; raises L{NotImplementedError}.
"""
raise NotImplementedError()
def fireSystemEvent(self, eventType):
"""
Not implemented; raises L{NotImplementedError}.
"""
raise NotImplementedError()
def addSystemEventTrigger(self, phase, eventType, callable, *args, **kw):
"""
Fake L{IReactorCore.run}.
Keep track of trigger by appending it to
self.triggers[phase][eventType].
"""
phaseTriggers = self.triggers.setdefault(phase, {})
eventTypeTriggers = phaseTriggers.setdefault(eventType, [])
eventTypeTriggers.append((callable, args, kw))
def removeSystemEventTrigger(self, triggerID):
"""
Not implemented; raises L{NotImplementedError}.
"""
raise NotImplementedError()
def callWhenRunning(self, callable, *args, **kw):
"""
Fake L{IReactorCore.callWhenRunning}.
Keeps a list of invocations to make in C{self.whenRunningHooks}.
"""
self.whenRunningHooks.append((callable, args, kw))
def adoptStreamPort(self, fileno, addressFamily, factory):
"""
Fake L{IReactorSocket.adoptStreamPort}, that logs the call and returns
an L{IListeningPort}.
"""
if addressFamily == AF_INET:
addr = IPv4Address('TCP', '0.0.0.0', 1234)
elif addressFamily == AF_INET6:
addr = IPv6Address('TCP', '::', 1234)
else:
raise UnsupportedAddressFamily()
self.adoptedPorts.append((fileno, addressFamily, factory))
return _FakePort(addr)
def adoptStreamConnection(self, fileDescriptor, addressFamily, factory):
"""
Record the given stream connection in C{adoptedStreamConnections}.
@see: L{twisted.internet.interfaces.IReactorSocket.adoptStreamConnection}
"""
self.adoptedStreamConnections.append((
fileDescriptor, addressFamily, factory))
def adoptDatagramPort(self, fileno, addressFamily, protocol,
maxPacketSize=8192):
"""
Fake L{IReactorSocket.adoptDatagramPort}, that logs the call and returns
a fake L{IListeningPort}.
@see: L{twisted.internet.interfaces.IReactorSocket.adoptDatagramPort}
"""
if addressFamily == AF_INET:
addr = IPv4Address('UDP', '0.0.0.0', 1234)
elif addressFamily == AF_INET6:
addr = IPv6Address('UDP', '::', 1234)
else:
raise UnsupportedAddressFamily()
self.adoptedPorts.append(
(fileno, addressFamily, protocol, maxPacketSize))
return _FakePort(addr)
def listenTCP(self, port, factory, backlog=50, interface=''):
"""
Fake L{IReactorTCP.listenTCP}, that logs the call and
returns an L{IListeningPort}.
"""
self.tcpServers.append((port, factory, backlog, interface))
if isIPv6Address(interface):
address = IPv6Address('TCP', interface, port)
else:
address = IPv4Address('TCP', '0.0.0.0', port)
return _FakePort(address)
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
Fake L{IReactorTCP.connectTCP}, that logs the call and
returns an L{IConnector}.
"""
self.tcpClients.append((host, port, factory, timeout, bindAddress))
if isIPv6Address(host):
conn = _FakeConnector(IPv6Address('TCP', host, port))
else:
conn = _FakeConnector(IPv4Address('TCP', host, port))
factory.startedConnecting(conn)
self.connectors.append(conn)
return conn
def listenSSL(self, port, factory, contextFactory,
backlog=50, interface=''):
"""
Fake L{IReactorSSL.listenSSL}, that logs the call and
returns an L{IListeningPort}.
"""
self.sslServers.append((port, factory, contextFactory,
backlog, interface))
return _FakePort(IPv4Address('TCP', '0.0.0.0', port))
def connectSSL(self, host, port, factory, contextFactory,
timeout=30, bindAddress=None):
"""
Fake L{IReactorSSL.connectSSL}, that logs the call and returns an
L{IConnector}.
"""
self.sslClients.append((host, port, factory, contextFactory,
timeout, bindAddress))
conn = _FakeConnector(IPv4Address('TCP', host, port))
factory.startedConnecting(conn)
self.connectors.append(conn)
return conn
def listenUNIX(self, address, factory,
backlog=50, mode=0o666, wantPID=0):
"""
Fake L{IReactorUNIX.listenUNIX}, that logs the call and returns an
L{IListeningPort}.
"""
self.unixServers.append((address, factory, backlog, mode, wantPID))
return _FakePort(UNIXAddress(address))
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""
Fake L{IReactorUNIX.connectUNIX}, that logs the call and returns an
L{IConnector}.
"""
self.unixClients.append((address, factory, timeout, checkPID))
conn = _FakeConnector(UNIXAddress(address))
factory.startedConnecting(conn)
self.connectors.append(conn)
return conn
def addReader(self, reader):
"""
Fake L{IReactorFDSet.addReader} which adds the reader to a local set.
"""
self.readers.add(reader)
def removeReader(self, reader):
"""
Fake L{IReactorFDSet.removeReader} which removes the reader from a
local set.
"""
self.readers.discard(reader)
def addWriter(self, writer):
"""
Fake L{IReactorFDSet.addWriter} which adds the writer to a local set.
"""
self.writers.add(writer)
def removeWriter(self, writer):
"""
Fake L{IReactorFDSet.removeWriter} which removes the writer from a
local set.
"""
self.writers.discard(writer)
def getReaders(self):
"""
Fake L{IReactorFDSet.getReaders} which returns a list of readers from
the local set.
"""
return list(self.readers)
def getWriters(self):
"""
Fake L{IReactorFDSet.getWriters} which returns a list of writers from
the local set.
"""
return list(self.writers)
def removeAll(self):
"""
Fake L{IReactorFDSet.removeAll} which removed all readers and writers
from the local sets.
"""
self.readers.clear()
self.writers.clear()
for iface in implementedBy(MemoryReactor):
verifyClass(iface, MemoryReactor)
class MemoryReactorClock(MemoryReactor, Clock):
def __init__(self):
MemoryReactor.__init__(self)
Clock.__init__(self)
@implementer(IReactorTCP, IReactorSSL, IReactorUNIX, IReactorSocket)
class RaisingMemoryReactor(object):
"""
A fake reactor to be used in tests. It accepts TCP connection setup
attempts, but they will fail.
@ivar _listenException: An instance of an L{Exception}
@ivar _connectException: An instance of an L{Exception}
"""
def __init__(self, listenException=None, connectException=None):
"""
@param listenException: An instance of an L{Exception} to raise when any
C{listen} method is called.
@param connectException: An instance of an L{Exception} to raise when
any C{connect} method is called.
"""
self._listenException = listenException
self._connectException = connectException
def adoptStreamPort(self, fileno, addressFamily, factory):
"""
Fake L{IReactorSocket.adoptStreamPort}, that raises
L{_listenException}.
"""
raise self._listenException
def listenTCP(self, port, factory, backlog=50, interface=''):
"""
Fake L{IReactorTCP.listenTCP}, that raises L{_listenException}.
"""
raise self._listenException
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
Fake L{IReactorTCP.connectTCP}, that raises L{_connectException}.
"""
raise self._connectException
def listenSSL(self, port, factory, contextFactory,
backlog=50, interface=''):
"""
Fake L{IReactorSSL.listenSSL}, that raises L{_listenException}.
"""
raise self._listenException
def connectSSL(self, host, port, factory, contextFactory,
timeout=30, bindAddress=None):
"""
Fake L{IReactorSSL.connectSSL}, that raises L{_connectException}.
"""
raise self._connectException
def listenUNIX(self, address, factory,
backlog=50, mode=0o666, wantPID=0):
"""
Fake L{IReactorUNIX.listenUNIX}, that raises L{_listenException}.
"""
raise self._listenException
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""
Fake L{IReactorUNIX.connectUNIX}, that raises L{_connectException}.
"""
raise self._connectException
class NonStreamingProducer(object):
"""
A pull producer which writes 10 times only.
"""
counter = 0
stopped = False
def __init__(self, consumer):
self.consumer = consumer
self.result = Deferred()
def resumeProducing(self):
"""
Write the counter value once.
"""
if self.consumer is None or self.counter >= 10:
raise RuntimeError("BUG: resume after unregister/stop.")
else:
self.consumer.write(intToBytes(self.counter))
self.counter += 1
if self.counter == 10:
self.consumer.unregisterProducer()
self._done()
def pauseProducing(self):
"""
An implementation of C{IPushProducer.pauseProducing}. This should never
be called on a pull producer, so this just raises an error.
"""
raise RuntimeError("BUG: pause should never be called.")
def _done(self):
"""
Fire a L{Deferred} so that users can wait for this to complete.
"""
self.consumer = None
d = self.result
del self.result
d.callback(None)
def stopProducing(self):
"""
Stop all production.
"""
self.stopped = True
self._done()
def waitUntilAllDisconnected(reactor, protocols):
"""
Take a list of disconnecting protocols, callback a L{Deferred} when they're
all done.
This is a hack to make some older tests less flaky, as
L{ITransport.loseConnection} is not atomic on all reactors (for example,
the CoreFoundation, which sometimes takes a reactor turn for CFSocket to
realise). New tests should either not use real sockets in testing, or take
the advice in
I{https://jml.io/pages/how-to-disconnect-in-twisted-really.html} to heart.
@param reactor: The reactor to schedule the checks on.
@type reactor: L{IReactorTime}
@param protocols: The protocols to wait for disconnecting.
@type protocols: A L{list} of L{IProtocol}s.
"""
lc = None
def _check():
if not True in [x.transport.connected for x in protocols]:
lc.stop()
lc = task.LoopingCall(_check)
lc.clock = reactor
return lc.start(0.01, now=True)
|
abdoosh00/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/textannotation_module.py | 15 | ''' Text annotation module '''
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
if self.runtime.get_real_user is not None:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
fragment.add_javascript_url("/static/js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url("/static/js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
|
vipulroxx/sympy | refs/heads/master | sympy/printing/ccode.py | 44 | """
C code printer
The CCodePrinter converts single sympy expressions into single C expressions,
using the functions defined in math.h where possible.
A complete code generator, which uses ccode extensively, can be found in
sympy.utilities.codegen. The codegen module can be used to generate complete
source code files that are compilable without further modifications.
"""
from __future__ import print_function, division
from sympy.core import S
from sympy.core.compatibility import string_types, range
from sympy.printing.codeprinter import CodePrinter, Assignment
from sympy.printing.precedence import precedence
# dictionary mapping sympy function to (argument_conditions, C_function).
# Used in CCodePrinter._print_Function(self)
known_functions = {
"Abs": [(lambda x: not x.is_integer, "fabs")],
"gamma": "tgamma",
"sin": "sin",
"cos": "cos",
"tan": "tan",
"asin": "asin",
"acos": "acos",
"atan": "atan",
"atan2": "atan2",
"exp": "exp",
"log": "log",
"erf": "erf",
"sinh": "sinh",
"cosh": "cosh",
"tanh": "tanh",
"asinh": "asinh",
"acosh": "acosh",
"atanh": "atanh",
"floor": "floor",
"ceiling": "ceil",
}
# These are the core reserved words in the C language. Taken from:
# http://crasseux.com/books/ctutorial/Reserved-words-in-C.html
reserved_words = ['auto',
'if',
'break',
'int',
'case',
'long',
'char',
'register',
'continue',
'return',
'default',
'short',
'do',
'sizeof',
'double',
'static',
'else',
'struct',
'entry',
'switch',
'extern',
'typedef',
'float',
'union',
'for',
'unsigned',
'goto',
'while',
'enum',
'void',
'const',
'signed',
'volatile']
class CCodePrinter(CodePrinter):
"""A printer to convert python expressions to strings of c code"""
printmethod = "_ccode"
language = "C"
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True,
'dereference': set(),
'error_on_reserved': False,
'reserved_word_suffix': '_',
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
self._dereference = set(settings.get('dereference', []))
self.reserved_words = set(reserved_words)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {0}".format(text)
def _declare_number_const(self, name, value):
return "double const {0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (int %(var)s=%(start)s; %(var)s<%(end)s; %(var)s++){"
for i in indices:
# C arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'var': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
if "Pow" in self.known_functions:
return self._print_Function(expr)
PREC = precedence(expr)
if expr.exp == -1:
return '1.0/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'sqrt(%s)' % self._print(expr.base)
else:
return 'pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d.0L/%d.0L' % (p, q)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "M_E"
def _print_Pi(self, expr):
return 'M_PI'
def _print_Infinity(self, expr):
return 'HUGE_VAL'
def _print_NegativeInfinity(self, expr):
return '-HUGE_VAL'
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_MatrixElement(self, expr):
return "{0}[{1}]".format(expr.parent, expr.j +
expr.i*expr.parent.shape[1])
def _print_Symbol(self, expr):
name = super(CCodePrinter, self)._print_Symbol(expr)
if expr in self._dereference:
return '(*{0})'.format(name)
else:
return name
def _print_sign(self, func):
return '((({0}) > 0) - (({0}) < 0))'.format(self._print(func.args[0]))
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def ccode(expr, assign_to=None, **settings):
"""Converts an expr to a string of c code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where the keys are string representations of either
``FunctionClass`` or ``UndefinedFunction`` instances and the values
are their desired C string representations. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
cfunction_string)]. See below for examples.
dereference : iterable, optional
An iterable of symbols that should be dereferenced in the printed code
expression. These would be values passed by address to the function.
For example, if ``dereference=[a]``, the resulting code would print
``(*a)`` instead of ``a``.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs, Function
>>> x, tau = symbols("x, tau")
>>> ccode((2*tau)**Rational(7, 2))
'8*sqrt(2)*pow(tau, 7.0L/2.0L)'
>>> ccode(sin(x), assign_to="s")
's = sin(x);'
Simple custom printing can be defined for certain types by passing a
dictionary of {"type" : "function"} to the ``user_functions`` kwarg.
Alternatively, the dictionary value can be a list of tuples i.e.
[(argument_test, cfunction_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")],
... "func": "f"
... }
>>> func = Function('func')
>>> ccode(func(Abs(x) + ceiling(x)), user_functions=custom_functions)
'f(fabs(x) + CEIL(x))'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(ccode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> ccode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(ccode(mat, A))
A[0] = pow(x, 2);
if (x > 0) {
A[1] = x + 1;
}
else {
A[1] = x;
}
A[2] = sin(x);
"""
return CCodePrinter(settings).doprint(expr, assign_to)
def print_ccode(expr, **settings):
"""Prints C representation of the given expression."""
print(ccode(expr, **settings))
|
msmolens/VTK | refs/heads/slicer-v6.3.0-2015-07-21-426987d | ThirdParty/AutobahnPython/autobahn/wamp1/protocol.py | 13 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
import sys
PY3 = sys.version_info >= (3,)
__all__ = ("WampProtocol",
"WampFactory",
"WampServerProtocol",
"WampServerFactory",
"WampClientProtocol",
"WampClientFactory",
"WampCraProtocol",
"WampCraClientProtocol",
"WampCraServerProtocol",
"json_lib",
"json_loads",
"json_dumps",)
import inspect, types
import traceback
if PY3:
from io import StringIO
else:
import StringIO
import hashlib, hmac, binascii, random
from twisted.python import log
from twisted.internet.defer import Deferred, \
maybeDeferred
from autobahn import __version__
from autobahn.websocket.protocol import WebSocketProtocol
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory, \
WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.wamp1.pbkdf2 import pbkdf2_bin
from autobahn.wamp1.prefixmap import PrefixMap
from autobahn.util import utcnow, newid, Tracker
def exportRpc(arg = None):
"""
Decorator for RPC'ed callables.
"""
## decorator without argument
if type(arg) is types.FunctionType:
arg._autobahn_rpc_id = arg.__name__
return arg
## decorator with argument
else:
def inner(f):
f._autobahn_rpc_id = arg
return f
return inner
def exportSub(arg, prefixMatch = False):
"""
Decorator for subscription handlers.
"""
def inner(f):
f._autobahn_sub_id = arg
f._autobahn_sub_prefix_match = prefixMatch
return f
return inner
def exportPub(arg, prefixMatch = False):
"""
Decorator for publication handlers.
"""
def inner(f):
f._autobahn_pub_id = arg
f._autobahn_pub_prefix_match = prefixMatch
return f
return inner
class WampProtocol:
"""
WAMP protocol base class. Mixin for WampServerProtocol and WampClientProtocol.
"""
URI_WAMP_BASE = "http://api.wamp.ws/"
"""
WAMP base URI for WAMP predefined things.
"""
URI_WAMP_ERROR = URI_WAMP_BASE + "error#"
"""
Prefix for WAMP errors.
"""
URI_WAMP_PROCEDURE = URI_WAMP_BASE + "procedure#"
"""
Prefix for WAMP predefined RPC endpoints.
"""
URI_WAMP_TOPIC = URI_WAMP_BASE + "topic#"
"""
Prefix for WAMP predefined PubSub topics.
"""
URI_WAMP_ERROR_GENERIC = URI_WAMP_ERROR + "generic"
"""
WAMP error URI for generic errors.
"""
DESC_WAMP_ERROR_GENERIC = "generic error"
"""
Description for WAMP generic errors.
"""
URI_WAMP_ERROR_INTERNAL = URI_WAMP_ERROR + "internal"
"""
WAMP error URI for internal errors.
"""
DESC_WAMP_ERROR_INTERNAL = "internal error"
"""
Description for WAMP internal errors.
"""
URI_WAMP_ERROR_NO_SUCH_RPC_ENDPOINT = URI_WAMP_ERROR + "NoSuchRPCEndpoint"
"""
WAMP error URI for RPC endpoint not found.
"""
WAMP_PROTOCOL_VERSION = 1
"""
WAMP version this server speaks. Versions are numbered consecutively
(integers, no gaps).
"""
MESSAGE_TYPEID_WELCOME = 0
"""
Server-to-client welcome message containing session ID.
"""
MESSAGE_TYPEID_PREFIX = 1
"""
Client-to-server message establishing a URI prefix to be used in CURIEs.
"""
MESSAGE_TYPEID_CALL = 2
"""
Client-to-server message initiating an RPC.
"""
MESSAGE_TYPEID_CALL_RESULT = 3
"""
Server-to-client message returning the result of a successful RPC.
"""
MESSAGE_TYPEID_CALL_ERROR = 4
"""
Server-to-client message returning the error of a failed RPC.
"""
MESSAGE_TYPEID_SUBSCRIBE = 5
"""
Client-to-server message subscribing to a topic.
"""
MESSAGE_TYPEID_UNSUBSCRIBE = 6
"""
Client-to-server message unsubscribing from a topic.
"""
MESSAGE_TYPEID_PUBLISH = 7
"""
Client-to-server message publishing an event to a topic.
"""
MESSAGE_TYPEID_EVENT = 8
"""
Server-to-client message providing the event of a (subscribed) topic.
"""
def connectionMade(self):
self.debugWamp = self.factory.debugWamp
self.debugApp = self.factory.debugApp
self.prefixes = PrefixMap()
self.calls = {}
self.procs = {}
def connectionLost(self, reason):
pass
def _protocolError(self, reason):
if self.debugWamp:
log.msg("Closing Wamp session on protocol violation : %s" % reason)
## FIXME: subprotocols are probably not supposed to close with CLOSE_STATUS_CODE_PROTOCOL_ERROR
##
self.protocolViolation("Wamp RPC/PubSub protocol violation ('%s')" % reason)
def shrink(self, uri, passthrough = False):
"""
Shrink given URI to CURIE according to current prefix mapping.
If no appropriate prefix mapping is available, return original URI.
:param uri: URI to shrink.
:type uri: str
:returns str -- CURIE or original URI.
"""
return self.prefixes.shrink(uri)
def resolve(self, curieOrUri, passthrough = False):
"""
Resolve given CURIE/URI according to current prefix mapping or return
None if cannot be resolved.
:param curieOrUri: CURIE or URI.
:type curieOrUri: str
:returns: str -- Full URI for CURIE or None.
"""
return self.prefixes.resolve(curieOrUri)
def resolveOrPass(self, curieOrUri):
"""
Resolve given CURIE/URI according to current prefix mapping or return
string verbatim if cannot be resolved.
:param curieOrUri: CURIE or URI.
:type curieOrUri: str
:returns: str -- Full URI for CURIE or original string.
"""
return self.prefixes.resolveOrPass(curieOrUri)
def serializeMessage(self, msg):
"""
Delegate message serialization to the factory.
:param msg: The message to be serialized.
:type msg: str
:return: The serialized message.
"""
return self.factory._serialize(msg)
def registerForRpc(self, obj, baseUri = "", methods = None):
"""
Register an service object for RPC. A service object has methods
which are decorated using @exportRpc.
:param obj: The object to be registered (in this WebSockets session) for RPC.
:type obj: Object with methods decorated using @exportRpc.
:param baseUri: Optional base URI which is prepended to method names for export.
:type baseUri: String.
:param methods: If not None, a list of unbound class methods corresponding to obj
which should be registered. This can be used to register only a subset
of the methods decorated with @exportRpc.
:type methods: List of unbound class methods.
"""
for k in inspect.getmembers(obj.__class__, inspect.ismethod):
if k[1].__dict__.has_key("_autobahn_rpc_id"):
if methods is None or k[1] in methods:
uri = baseUri + k[1].__dict__["_autobahn_rpc_id"]
proc = k[1]
self.registerMethodForRpc(uri, obj, proc)
def registerMethodForRpc(self, uri, obj, proc):
"""
Register a method of an object for RPC.
:param uri: URI to register RPC method under.
:type uri: str
:param obj: The object on which to register a method for RPC.
:type obj: object
:param proc: Unbound object method to register RPC for.
:type proc: unbound method
"""
self.procs[uri] = (obj, proc, False)
if self.debugWamp:
log.msg("registered remote method on %s" % uri)
def registerProcedureForRpc(self, uri, proc):
"""
Register a (free standing) function/procedure for RPC.
:param uri: URI to register RPC function/procedure under.
:type uri: str
:param proc: Free-standing function/procedure.
:type proc: callable
"""
self.procs[uri] = (None, proc, False)
if self.debugWamp:
log.msg("registered remote procedure on %s" % uri)
def registerHandlerMethodForRpc(self, uri, obj, handler, extra = None):
"""
Register a handler on an object for RPC.
:param uri: URI to register RPC method under.
:type uri: str
:param obj: The object on which to register the RPC handler
:type obj: object
:param proc: Unbound object method to register RPC for.
:type proc: unbound method
:param extra: Optional extra data that will be given to the handler at call time.
:type extra: object
"""
self.procs[uri] = (obj, handler, True, extra)
if self.debugWamp:
log.msg("registered remote handler method on %s" % uri)
def registerHandlerProcedureForRpc(self, uri, handler, extra = None):
"""
Register a (free standing) handler for RPC.
:param uri: URI to register RPC handler under.
:type uri: str
:param proc: Free-standing handler
:type proc: callable
:param extra: Optional extra data that will be given to the handler at call time.
:type extra: object
"""
self.procs[uri] = (None, handler, True, extra)
if self.debugWamp:
log.msg("registered remote handler procedure on %s" % uri)
def procForUri(self, uri):
"""
Returns the procedure specification for `uri` or None, if it does not exist.
:param uri: URI to be checked.
:type uri: str
:returns: The procedure specification for `uri`, if it exists,
`None` otherwise.
"""
return self.procs[uri] if uri in self.procs else None
def onBeforeCall(self, callid, uri, args, isRegistered):
"""
Callback fired before executing incoming RPC. This can be used for
logging, statistics tracking or redirecting RPCs or argument mangling i.e.
The default implementation just returns the incoming URI/args.
:param uri: RPC endpoint URI (fully-qualified).
:type uri: str
:param args: RPC arguments array.
:type args: list
:param isRegistered: True, iff RPC endpoint URI is registered in this session.
:type isRegistered: bool
:returns pair -- Must return URI/Args pair.
"""
return uri, args
def onAfterCallSuccess(self, result, call):
"""
Callback fired after executing incoming RPC with success, but before
sending the RPC success message.
The default implementation will just return `result` to the client.
:param result: Result returned for executing the incoming RPC.
:type result: Anything returned by the user code for the endpoint.
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
:returns obj -- Result send back to client.
"""
return result
def onAfterCallError(self, error, call):
"""
Callback fired after executing incoming RPC with failure, but before
sending the RPC error message.
The default implementation will just return `error` to the client.
:param error: Error that occurred during incomnig RPC call execution.
:type error: Instance of twisted.python.failure.Failure
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
:returns twisted.python.failure.Failure -- Error sent back to client.
"""
return error
def onAfterSendCallSuccess(self, msg, call):
"""
Callback fired after sending RPC success message.
:param msg: Serialized WAMP message.
:type msg: str
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
"""
pass
def onAfterSendCallError(self, msg, call):
"""
Callback fired after sending RPC error message.
:param msg: Serialized WAMP message.
:type msg: str
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
"""
pass
def call(self, *args):
"""
Perform a remote-procedure call (RPC). The first argument is the procedure
URI (mandatory). Subsequent positional arguments can be provided (must be
JSON serializable). The return value is a Twisted Deferred.
"""
if len(args) < 1:
raise Exception("missing procedure URI")
if type(args[0]) not in [unicode, str]:
raise Exception("invalid type for procedure URI")
procuri = args[0]
callid = None
while True:
callid = newid()
if not self.calls.has_key(callid):
break
d = Deferred()
self.calls[callid] = d
msg = [WampProtocol.MESSAGE_TYPEID_CALL, callid, procuri]
msg.extend(args[1:])
try:
o = self.factory._serialize(msg)
except:
raise Exception("call argument(s) not JSON serializable")
self.sendMessage(o)
return d
## use Ultrajson (https://github.com/esnme/ultrajson) if available
##
try:
import ujson
json_lib = ujson
json_loads = ujson.loads
json_dumps = lambda x: ujson.dumps(x, ensure_ascii = False)
except:
import json
json_lib = json
json_loads = json.loads
json_dumps = json.dumps
class WampFactory:
"""
WAMP factory base class. Mixin for WampServerFactory and WampClientFactory.
"""
def __init__(self):
if self.debugWamp:
log.msg("Using JSON processor '%s'" % json_lib.__name__)
def _serialize(self, obj):
"""
Default object serializer.
"""
return json_dumps(obj)
def _unserialize(self, bytes):
"""
Default object deserializer.
"""
return json_loads(bytes)
class WampServerProtocol(WebSocketServerProtocol, WampProtocol):
"""
Server factory for Wamp RPC/PubSub.
"""
SUBSCRIBE = 1
PUBLISH = 2
def onSessionOpen(self):
"""
Callback fired when WAMP session was fully established.
"""
pass
def onOpen(self):
"""
Default implementation for WAMP connection opened sends
Welcome message containing session ID.
"""
self.session_id = newid()
## include traceback as error detail for RPC errors with
## no error URI - that is errors returned with URI_WAMP_ERROR_GENERIC
self.includeTraceback = False
msg = [WampProtocol.MESSAGE_TYPEID_WELCOME,
self.session_id,
WampProtocol.WAMP_PROTOCOL_VERSION,
"Autobahn/%s" % __version__]
o = self.factory._serialize(msg)
self.sendMessage(o)
self.factory._addSession(self, self.session_id)
self.onSessionOpen()
def onConnect(self, connectionRequest):
"""
Default implementation for WAMP connection acceptance:
check if client announced WAMP subprotocol, and only accept connection
if client did so.
"""
for p in connectionRequest.protocols:
if p in self.factory.protocols:
return (p, {}) # return (protocol, headers)
raise http.HttpException(http.BAD_REQUEST[0], "this server only speaks WAMP")
def connectionMade(self):
WebSocketServerProtocol.connectionMade(self)
WampProtocol.connectionMade(self)
## RPCs registered in this session (a URI map of (object, procedure)
## pairs for object methods or (None, procedure) for free standing procedures)
self.procs = {}
## Publication handlers registered in this session (a URI map of (object, pubHandler) pairs
## pairs for object methods (handlers) or (None, None) for topic without handler)
self.pubHandlers = {}
## Subscription handlers registered in this session (a URI map of (object, subHandler) pairs
## pairs for object methods (handlers) or (None, None) for topic without handler)
self.subHandlers = {}
self.handlerMapping = {
self.MESSAGE_TYPEID_CALL: CallHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_RESULT: CallResultHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_ERROR: CallErrorHandler(self, self.prefixes)}
def connectionLost(self, reason):
self.factory._unsubscribeClient(self)
self.factory._removeSession(self)
WampProtocol.connectionLost(self, reason)
WebSocketServerProtocol.connectionLost(self, reason)
def sendMessage(self,
payload,
binary = False,
payload_frag_size = None,
sync = False,
doNotCompress = False):
if self.debugWamp:
log.msg("TX WAMP: %s" % str(payload))
WebSocketServerProtocol.sendMessage(self,
payload,
binary,
payload_frag_size,
sync,
doNotCompress)
def _getPubHandler(self, topicUri):
## Longest matching prefix based resolution of (full) topic URI to
## publication handler.
## Returns a 5-tuple (consumedUriPart, unconsumedUriPart, handlerObj, handlerProc, prefixMatch)
##
for i in xrange(len(topicUri), -1, -1):
tt = topicUri[:i]
if self.pubHandlers.has_key(tt):
h = self.pubHandlers[tt]
return (tt, topicUri[i:], h[0], h[1], h[2])
return None
def _getSubHandler(self, topicUri):
## Longest matching prefix based resolution of (full) topic URI to
## subscription handler.
## Returns a 5-tuple (consumedUriPart, unconsumedUriPart, handlerObj, handlerProc, prefixMatch)
##
for i in xrange(len(topicUri), -1, -1):
tt = topicUri[:i]
if self.subHandlers.has_key(tt):
h = self.subHandlers[tt]
return (tt, topicUri[i:], h[0], h[1], h[2])
return None
def registerForPubSub(self, topicUri, prefixMatch = False, pubsub = PUBLISH | SUBSCRIBE):
"""
Register a topic URI as publish/subscribe channel in this session.
:param topicUri: Topic URI to be established as publish/subscribe channel.
:type topicUri: str
:param prefixMatch: Allow to match this topic URI by prefix.
:type prefixMatch: bool
:param pubsub: Allow publication and/or subscription.
:type pubsub: WampServerProtocol.PUB, WampServerProtocol.SUB, WampServerProtocol.PUB | WampServerProtocol.SUB
"""
if pubsub & WampServerProtocol.PUBLISH:
self.pubHandlers[topicUri] = (None, None, prefixMatch)
if self.debugWamp:
log.msg("registered topic %s for publication (match by prefix = %s)" % (topicUri, prefixMatch))
if pubsub & WampServerProtocol.SUBSCRIBE:
self.subHandlers[topicUri] = (None, None, prefixMatch)
if self.debugWamp:
log.msg("registered topic %s for subscription (match by prefix = %s)" % (topicUri, prefixMatch))
def registerHandlerForPubSub(self, obj, baseUri = ""):
"""
Register a handler object for PubSub. A handler object has methods
which are decorated using @exportPub and @exportSub.
:param obj: The object to be registered (in this WebSockets session) for PubSub.
:type obj: Object with methods decorated using @exportPub and @exportSub.
:param baseUri: Optional base URI which is prepended to topic names for export.
:type baseUri: String.
"""
for k in inspect.getmembers(obj.__class__, inspect.ismethod):
if k[1].__dict__.has_key("_autobahn_pub_id"):
uri = baseUri + k[1].__dict__["_autobahn_pub_id"]
prefixMatch = k[1].__dict__["_autobahn_pub_prefix_match"]
proc = k[1]
self.registerHandlerForPub(uri, obj, proc, prefixMatch)
elif k[1].__dict__.has_key("_autobahn_sub_id"):
uri = baseUri + k[1].__dict__["_autobahn_sub_id"]
prefixMatch = k[1].__dict__["_autobahn_sub_prefix_match"]
proc = k[1]
self.registerHandlerForSub(uri, obj, proc, prefixMatch)
def registerHandlerForSub(self, uri, obj, proc, prefixMatch = False):
"""
Register a method of an object as subscription handler.
:param uri: Topic URI to register subscription handler for.
:type uri: str
:param obj: The object on which to register a method as subscription handler.
:type obj: object
:param proc: Unbound object method to register as subscription handler.
:type proc: unbound method
:param prefixMatch: Allow to match this topic URI by prefix.
:type prefixMatch: bool
"""
self.subHandlers[uri] = (obj, proc, prefixMatch)
if not self.pubHandlers.has_key(uri):
self.pubHandlers[uri] = (None, None, False)
if self.debugWamp:
log.msg("registered subscription handler for topic %s" % uri)
def registerHandlerForPub(self, uri, obj, proc, prefixMatch = False):
"""
Register a method of an object as publication handler.
:param uri: Topic URI to register publication handler for.
:type uri: str
:param obj: The object on which to register a method as publication handler.
:type obj: object
:param proc: Unbound object method to register as publication handler.
:type proc: unbound method
:param prefixMatch: Allow to match this topic URI by prefix.
:type prefixMatch: bool
"""
self.pubHandlers[uri] = (obj, proc, prefixMatch)
if not self.subHandlers.has_key(uri):
self.subHandlers[uri] = (None, None, False)
if self.debugWamp:
log.msg("registered publication handler for topic %s" % uri)
# noinspection PyDefaultArgument
def dispatch(self, topicUri, event, exclude = [], eligible = None):
"""
Dispatch an event for a topic to all clients subscribed to
and authorized for that topic.
Optionally, exclude list of clients and/or only consider clients
from explicit eligibles. In other words, the event is delivered
to the set
(subscribers - excluded) & eligible
:param topicUri: URI of topic to publish event to.
:type topicUri: str
:param event: Event to dispatch.
:type event: obj
:param exclude: Optional list of clients (WampServerProtocol instances) to exclude.
:type exclude: list of obj
:param eligible: Optional list of clients (WampServerProtocol instances) eligible at all (or None for all).
:type eligible: list of obj
:returns twisted.internet.defer.Deferred -- Will be fired when event was
dispatched to all subscribers. The return value provided to the deferred
is a pair (delivered, requested), where delivered = number of actual
receivers, and requested = number of (subscribers - excluded) & eligible.
"""
return self.factory.dispatch(topicUri, event, exclude, eligible)
def onMessage(self, msg, binary):
"""
Handle WAMP messages received from WAMP client.
"""
if self.debugWamp:
log.msg("RX WAMP: %s" % str(msg))
if not binary:
try:
obj = self.factory._unserialize(msg)
if type(obj) == list:
msgtype = obj[0]
### XXX Replace check by try...except when all handlers
### XXX are in place. Exception handling should create
### XXX a protocolError message about unsupported
### XXX message type
if msgtype in [WampProtocol.MESSAGE_TYPEID_CALL,
WampProtocol.MESSAGE_TYPEID_CALL_RESULT,
WampProtocol.MESSAGE_TYPEID_CALL_ERROR]:
self.handlerMapping[msgtype].handleMessage(obj)
### XXX Move remaining code to appropriate handlers
## Subscribe Message
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_SUBSCRIBE:
topicUri = self.prefixes.resolveOrPass(obj[1]) ### PFX - remove
h = self._getSubHandler(topicUri)
if h:
## either exact match or prefix match allowed
if h[1] == "" or h[4]:
## direct topic
if h[2] is None and h[3] is None:
self.factory._subscribeClient(self, topicUri)
## topic handled by subscription handler
else:
## handler is object method
if h[2]:
a = maybeDeferred(h[3], h[2], str(h[0]), str(h[1]))
## handler is free standing procedure
else:
a = maybeDeferred(h[3], str(h[0]), str(h[1]))
def fail(failure):
if self.debugWamp:
log.msg("exception during custom subscription handler: %s" % failure)
def done(result):
## only subscribe client if handler did return True
if result:
self.factory._subscribeClient(self, topicUri)
a.addCallback(done).addErrback(fail)
else:
if self.debugWamp:
log.msg("topic %s matches only by prefix and prefix match disallowed" % topicUri)
else:
if self.debugWamp:
log.msg("no topic / subscription handler registered for %s" % topicUri)
## Unsubscribe Message
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_UNSUBSCRIBE:
topicUri = self.prefixes.resolveOrPass(obj[1]) ### PFX - remove
self.factory._unsubscribeClient(self, topicUri)
## Publish Message
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_PUBLISH:
topicUri = self.prefixes.resolveOrPass(obj[1]) ### PFX - remove
h = self._getPubHandler(topicUri)
if h:
## either exact match or prefix match allowed
if h[1] == "" or h[4]:
## Event
##
event = obj[2]
## Exclude Sessions List
##
exclude = [self] # exclude publisher by default
if len(obj) >= 4:
if type(obj[3]) == bool:
if not obj[3]:
exclude = []
elif type(obj[3]) == list:
## map session IDs to protos
exclude = self.factory.sessionIdsToProtos(obj[3])
else:
## FIXME: invalid type
pass
## Eligible Sessions List
##
eligible = None # all sessions are eligible by default
if len(obj) >= 5:
if type(obj[4]) == list:
## map session IDs to protos
eligible = self.factory.sessionIdsToProtos(obj[4])
else:
## FIXME: invalid type
pass
## direct topic
if h[2] is None and h[3] is None:
self.factory.dispatch(topicUri, event, exclude, eligible)
## topic handled by publication handler
else:
## handler is object method
if h[2]:
e = maybeDeferred(h[3], h[2], str(h[0]), str(h[1]), event)
## handler is free standing procedure
else:
e = maybeDeferred(h[3], str(h[0]), str(h[1]), event)
def fail(failure):
if self.debugWamp:
log.msg("exception during custom publication handler: %s" % failure)
def done(result):
## only dispatch event if handler did return event
if result:
self.factory.dispatch(topicUri, result, exclude, eligible)
e.addCallback(done).addErrback(fail)
else:
if self.debugWamp:
log.msg("topic %s matches only by prefix and prefix match disallowed" % topicUri)
else:
if self.debugWamp:
log.msg("no topic / publication handler registered for %s" % topicUri)
## Define prefix to be used in CURIEs
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_PREFIX:
prefix = obj[1]
uri = obj[2]
self.prefixes.set(prefix, uri) ### PFX - remove whole block (this msg type won't survive)
else:
log.msg("unknown message type")
else:
log.msg("msg not a list")
except Exception:
traceback.print_exc()
else:
log.msg("binary message")
class WampServerFactory(WebSocketServerFactory, WampFactory):
"""
Server factory for Wamp RPC/PubSub.
"""
protocol = WampServerProtocol
"""
Twisted protocol used by default for WAMP servers.
"""
def __init__(self,
url,
debug = False,
debugCodePaths = False,
debugWamp = False,
debugApp = False,
externalPort = None,
reactor = None):
self.debugWamp = debugWamp
self.debugApp = debugApp
WebSocketServerFactory.__init__(self,
url,
protocols = ["wamp"],
debug = debug,
debugCodePaths = debugCodePaths,
externalPort = externalPort,
reactor = reactor)
WampFactory.__init__(self)
def onClientSubscribed(self, proto, topicUri):
"""
Callback fired when peer was (successfully) subscribed on some topic.
:param proto: Peer protocol instance subscribed.
:type proto: Instance of WampServerProtocol.
:param topicUri: Fully qualified, resolved URI of topic subscribed.
:type topicUri: str
"""
pass
def _subscribeClient(self, proto, topicUri):
"""
Called from proto to subscribe client for topic.
"""
if not self.subscriptions.has_key(topicUri):
self.subscriptions[topicUri] = set()
if self.debugWamp:
log.msg("subscriptions map created for topic %s" % topicUri)
if not proto in self.subscriptions[topicUri]:
self.subscriptions[topicUri].add(proto)
if self.debugWamp:
log.msg("subscribed peer %s on topic %s" % (proto.peer, topicUri))
self.onClientSubscribed(proto, topicUri)
else:
if self.debugWamp:
log.msg("peer %s already subscribed on topic %s" % (proto.peer, topicUri))
def onClientUnsubscribed(self, proto, topicUri):
"""
Callback fired when peer was (successfully) unsubscribed from some topic.
:param proto: Peer protocol instance unsubscribed.
:type proto: Instance of WampServerProtocol.
:param topicUri: Fully qualified, resolved URI of topic unsubscribed.
:type topicUri: str
"""
pass
def _unsubscribeClient(self, proto, topicUri = None):
"""
Called from proto to unsubscribe client from topic.
"""
if topicUri:
if self.subscriptions.has_key(topicUri) and proto in self.subscriptions[topicUri]:
self.subscriptions[topicUri].discard(proto)
if self.debugWamp:
log.msg("unsubscribed peer %s from topic %s" % (proto.peer, topicUri))
if len(self.subscriptions[topicUri]) == 0:
del self.subscriptions[topicUri]
if self.debugWamp:
log.msg("topic %s removed from subscriptions map - no one subscribed anymore" % topicUri)
self.onClientUnsubscribed(proto, topicUri)
else:
if self.debugWamp:
log.msg("peer %s not subscribed on topic %s" % (proto.peer, topicUri))
else:
for topicUri, subscribers in self.subscriptions.items():
if proto in subscribers:
subscribers.discard(proto)
if self.debugWamp:
log.msg("unsubscribed peer %s from topic %s" % (proto.peer, topicUri))
if len(subscribers) == 0:
del self.subscriptions[topicUri]
if self.debugWamp:
log.msg("topic %s removed from subscriptions map - no one subscribed anymore" % topicUri)
self.onClientUnsubscribed(proto, topicUri)
if self.debugWamp:
log.msg("unsubscribed peer %s from all topics" % (proto.peer))
# noinspection PyDefaultArgument
def dispatch(self, topicUri, event, exclude = [], eligible = None):
"""
Dispatch an event to all peers subscribed to the event topic.
:param topicUri: Topic to publish event to.
:type topicUri: str
:param event: Event to publish (must be JSON serializable).
:type event: obj
:param exclude: List of WampServerProtocol instances to exclude from receivers.
:type exclude: List of obj
:param eligible: List of WampServerProtocol instances eligible as receivers (or None for all).
:type eligible: List of obj
:returns twisted.internet.defer.Deferred -- Will be fired when event was
dispatched to all subscribers. The return value provided to the deferred
is a pair (delivered, requested), where delivered = number of actual
receivers, and requested = number of (subscribers - excluded) & eligible.
"""
if self.debugWamp:
log.msg("publish event %s for topicUri %s" % (str(event), topicUri))
d = Deferred()
if self.subscriptions.has_key(topicUri) and len(self.subscriptions[topicUri]) > 0:
## FIXME: this might break ordering of event delivery from a
## receiver perspective. We might need to have send queues
## per receiver OR do recvs = deque(sorted(..))
## However, see http://twistedmatrix.com/trac/ticket/1396
if eligible is not None:
subscrbs = set(eligible) & self.subscriptions[topicUri]
else:
subscrbs = self.subscriptions[topicUri]
if len(exclude) > 0:
recvs = subscrbs - set(exclude)
else:
recvs = subscrbs
l = len(recvs)
if l > 0:
## ok, at least 1 subscriber not excluded and eligible
## => prepare message for mass sending
##
o = [WampProtocol.MESSAGE_TYPEID_EVENT, topicUri, event]
try:
msg = self._serialize(o)
if self.debugWamp:
log.msg("serialized event msg: " + str(msg))
except Exception as e:
raise Exception("invalid type for event - serialization failed [%s]" % e)
preparedMsg = self.prepareMessage(msg)
## chunked sending of prepared message
##
self._sendEvents(preparedMsg, recvs.copy(), 0, l, d)
else:
## receivers list empty after considering exlude and eligible sessions
##
d.callback((0, 0))
else:
## no one subscribed on topic
##
d.callback((0, 0))
return d
def _sendEvents(self, preparedMsg, recvs, delivered, requested, d):
"""
Delivers events to receivers in chunks and reenters the reactor
in-between, so that other stuff can run.
"""
## deliver a batch of events
done = False
for i in xrange(0, 256):
try:
proto = recvs.pop()
if proto.state == WebSocketProtocol.STATE_OPEN:
try:
proto.sendPreparedMessage(preparedMsg)
except:
pass
else:
if self.debugWamp:
log.msg("delivered event to peer %s" % proto.peer)
delivered += 1
except KeyError:
# all receivers done
done = True
break
if not done:
## if there are receivers left, redo
self.reactor.callLater(0, self._sendEvents, preparedMsg, recvs, delivered, requested, d)
else:
## else fire final result
d.callback((delivered, requested))
def _addSession(self, proto, session_id):
"""
Add proto for session ID.
"""
if not self.protoToSessions.has_key(proto):
self.protoToSessions[proto] = session_id
else:
raise Exception("logic error - dublicate _addSession for protoToSessions")
if not self.sessionsToProto.has_key(session_id):
self.sessionsToProto[session_id] = proto
else:
raise Exception("logic error - dublicate _addSession for sessionsToProto")
def _removeSession(self, proto):
"""
Remove session by proto.
"""
if self.protoToSessions.has_key(proto):
session_id = self.protoToSessions[proto]
del self.protoToSessions[proto]
if self.sessionsToProto.has_key(session_id):
del self.sessionsToProto[session_id]
def sessionIdToProto(self, sessionId):
"""
Map WAMP session ID to connected protocol instance (object of type WampServerProtocol).
:param sessionId: WAMP session ID to be mapped.
:type sessionId: str
:returns obj -- WampServerProtocol instance or None.
"""
return self.sessionsToProto.get(sessionId, None)
def sessionIdsToProtos(self, sessionIds):
"""
Map WAMP session IDs to connected protocol instances (objects of type WampServerProtocol).
:param sessionIds: List of session IDs to be mapped.
:type sessionIds: list of str
:returns list -- List of WampServerProtocol instances corresponding to the WAMP session IDs.
"""
protos = []
for s in sessionIds:
if self.sessionsToProto.has_key(s):
protos.append(self.sessionsToProto[s])
return protos
def protoToSessionId(self, proto):
"""
Map connected protocol instance (object of type WampServerProtocol) to WAMP session ID.
:param proto: Instance of WampServerProtocol to be mapped.
:type proto: obj of WampServerProtocol
:returns str -- WAMP session ID or None.
"""
return self.protoToSessions.get(proto, None)
def protosToSessionIds(self, protos):
"""
Map connected protocol instances (objects of type WampServerProtocol) to WAMP session IDs.
:param protos: List of instances of WampServerProtocol to be mapped.
:type protos: list of WampServerProtocol
:returns list -- List of WAMP session IDs corresponding to the protos.
"""
sessionIds = []
for p in protos:
if self.protoToSessions.has_key(p):
sessionIds.append(self.protoToSessions[p])
return sessionIds
def startFactory(self):
"""
Called by Twisted when the factory starts up. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WampServerFactory starting")
self.subscriptions = {}
self.protoToSessions = {}
self.sessionsToProto = {}
def stopFactory(self):
"""
Called by Twisted when the factory shuts down. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WampServerFactory stopped")
class WampClientProtocol(WebSocketClientProtocol, WampProtocol):
"""
Twisted client protocol for WAMP.
"""
def onSessionOpen(self):
"""
Callback fired when WAMP session was fully established. Override
in derived class.
"""
pass
def onOpen(self):
## do nothing here .. onSessionOpen is only fired when welcome
## message was received (and thus session ID set)
pass
def onConnect(self, connectionResponse):
if connectionResponse.protocol not in self.factory.protocols:
raise Exception("server does not speak WAMP")
def connectionMade(self):
WebSocketClientProtocol.connectionMade(self)
WampProtocol.connectionMade(self)
self.subscriptions = {}
self.handlerMapping = {
self.MESSAGE_TYPEID_CALL: CallHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_RESULT: CallResultHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_ERROR: CallErrorHandler(self, self.prefixes)}
def connectionLost(self, reason):
WampProtocol.connectionLost(self, reason)
WebSocketClientProtocol.connectionLost(self, reason)
def sendMessage(self, payload):
if self.debugWamp:
log.msg("TX WAMP: %s" % str(payload))
WebSocketClientProtocol.sendMessage(self, payload)
def onMessage(self, msg, binary):
"""Internal method to handle WAMP messages received from WAMP server."""
## WAMP is text message only
##
if binary:
self._protocolError("binary WebSocket message received")
return
if self.debugWamp:
log.msg("RX WAMP: %s" % str(msg))
## WAMP is proper JSON payload
##
try:
obj = self.factory._unserialize(msg)
except Exception as e:
self._protocolError("WAMP message payload could not be unserialized [%s]" % e)
return
## Every WAMP message is a list
##
if type(obj) != list:
self._protocolError("WAMP message payload not a list")
return
## Every WAMP message starts with an integer for message type
##
if len(obj) < 1:
self._protocolError("WAMP message without message type")
return
if type(obj[0]) != int:
self._protocolError("WAMP message type not an integer")
return
## WAMP message type
##
msgtype = obj[0]
## Valid WAMP message types received by WAMP clients
##
if msgtype not in [WampProtocol.MESSAGE_TYPEID_WELCOME,
WampProtocol.MESSAGE_TYPEID_CALL,
WampProtocol.MESSAGE_TYPEID_CALL_RESULT,
WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
WampProtocol.MESSAGE_TYPEID_EVENT]:
self._protocolError("invalid WAMP message type %d" % msgtype)
return
if msgtype in [WampProtocol.MESSAGE_TYPEID_CALL,
WampProtocol.MESSAGE_TYPEID_CALL_RESULT,
WampProtocol.MESSAGE_TYPEID_CALL_ERROR]:
self.handlerMapping[msgtype].handleMessage(obj)
## WAMP EVENT
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_EVENT:
## Topic
##
if len(obj) != 3:
self._protocolError("WAMP EVENT message invalid length %d" % len(obj))
return
if type(obj[1]) not in [unicode, str]:
self._protocolError("invalid type for <topic> in WAMP EVENT message")
return
unresolvedTopicUri = str(obj[1])
topicUri = self.prefixes.resolveOrPass(unresolvedTopicUri) ### PFX - remove
## Fire PubSub Handler
##
if self.subscriptions.has_key(topicUri):
event = obj[2]
# noinspection PyCallingNonCallable
self.subscriptions[topicUri](topicUri, event)
else:
## event received for non-subscribed topic (could be because we
## just unsubscribed, and server already sent out event for
## previous subscription)
pass
## WAMP WELCOME
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_WELCOME:
## Session ID
##
if len(obj) < 2:
self._protocolError("WAMP WELCOME message invalid length %d" % len(obj))
return
if type(obj[1]) not in [unicode, str]:
self._protocolError("invalid type for <sessionid> in WAMP WELCOME message")
return
self.session_id = str(obj[1])
## WAMP Protocol Version
##
if len(obj) > 2:
if type(obj[2]) not in [int]:
self._protocolError("invalid type for <version> in WAMP WELCOME message")
return
else:
self.session_protocol_version = obj[2]
else:
self.session_protocol_version = None
## Server Ident
##
if len(obj) > 3:
if type(obj[3]) not in [unicode, str]:
self._protocolError("invalid type for <server> in WAMP WELCOME message")
return
else:
self.session_server = obj[3]
else:
self.session_server = None
self.onSessionOpen()
else:
raise Exception("logic error")
def prefix(self, prefix, uri):
"""
Establishes a prefix to be used in `CURIEs <http://en.wikipedia.org/wiki/CURIE>`_
instead of URIs having that prefix for both client-to-server and
server-to-client messages.
:param prefix: Prefix to be used in CURIEs.
:type prefix: str
:param uri: URI that this prefix will resolve to.
:type uri: str
"""
if type(prefix) != str:
raise Exception("invalid type for prefix")
if type(uri) not in [unicode, str]:
raise Exception("invalid type for URI")
if self.prefixes.get(prefix): ### PFX - keep
raise Exception("prefix already defined")
self.prefixes.set(prefix, uri) ### PFX - keep
msg = [WampProtocol.MESSAGE_TYPEID_PREFIX, prefix, uri]
self.sendMessage(self.factory._serialize(msg))
def publish(self, topicUri, event, excludeMe = None, exclude = None, eligible = None):
"""
Publish an event under a topic URI. The latter may be abbreviated using a
CURIE which has been previously defined using prefix(). The event must
be JSON serializable.
:param topicUri: The topic URI or CURIE.
:type topicUri: str
:param event: Event to be published (must be JSON serializable) or None.
:type event: value
:param excludeMe: When True, don't deliver the published event to myself (when I'm subscribed).
:type excludeMe: bool
:param exclude: Optional list of session IDs to exclude from receivers.
:type exclude: list of str
:param eligible: Optional list of session IDs to that are eligible as receivers.
:type eligible: list of str
"""
if type(topicUri) not in [unicode, str]:
raise Exception("invalid type for parameter 'topicUri' - must be string (was %s)" % type(topicUri))
if excludeMe is not None:
if type(excludeMe) != bool:
raise Exception("invalid type for parameter 'excludeMe' - must be bool (was %s)" % type(excludeMe))
if exclude is not None:
if type(exclude) != list:
raise Exception("invalid type for parameter 'exclude' - must be list (was %s)" % type(exclude))
if eligible is not None:
if type(eligible) != list:
raise Exception("invalid type for parameter 'eligible' - must be list (was %s)" % type(eligible))
if exclude is not None or eligible is not None:
if exclude is None:
if excludeMe is not None:
if excludeMe:
exclude = [self.session_id]
else:
exclude = []
else:
exclude = [self.session_id]
if eligible is not None:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event, exclude, eligible]
else:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event, exclude]
else:
if excludeMe:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event]
else:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event, excludeMe]
try:
o = self.factory._serialize(msg)
except:
raise Exception("invalid type for parameter 'event' - not JSON serializable")
self.sendMessage(o)
def subscribe(self, topicUri, handler):
"""
Subscribe to topic. When already subscribed, will overwrite the handler.
:param topicUri: URI or CURIE of topic to subscribe to.
:type topicUri: str
:param handler: Event handler to be invoked upon receiving events for topic.
:type handler: Python callable, will be called as in <callable>(eventUri, event).
"""
if type(topicUri) not in [unicode, str]:
raise Exception("invalid type for parameter 'topicUri' - must be string (was %s)" % type(topicUri))
if not hasattr(handler, '__call__'):
raise Exception("invalid type for parameter 'handler' - must be a callable (was %s)" % type(handler))
turi = self.prefixes.resolveOrPass(topicUri) ### PFX - keep
if not self.subscriptions.has_key(turi):
msg = [WampProtocol.MESSAGE_TYPEID_SUBSCRIBE, topicUri]
o = self.factory._serialize(msg)
self.sendMessage(o)
self.subscriptions[turi] = handler
def unsubscribe(self, topicUri):
"""
Unsubscribe from topic. Will do nothing when currently not subscribed to the topic.
:param topicUri: URI or CURIE of topic to unsubscribe from.
:type topicUri: str
"""
if type(topicUri) not in [unicode, str]:
raise Exception("invalid type for parameter 'topicUri' - must be string (was %s)" % type(topicUri))
turi = self.prefixes.resolveOrPass(topicUri) ### PFX - keep
if self.subscriptions.has_key(turi):
msg = [WampProtocol.MESSAGE_TYPEID_UNSUBSCRIBE, topicUri]
o = self.factory._serialize(msg)
self.sendMessage(o)
del self.subscriptions[turi]
class WampClientFactory(WebSocketClientFactory, WampFactory):
"""
Twisted client factory for WAMP.
"""
protocol = WampClientProtocol
def __init__(self,
url,
debug = False,
debugCodePaths = False,
debugWamp = False,
debugApp = False,
reactor = None):
self.debugWamp = debugWamp
self.debugApp = debugApp
WebSocketClientFactory.__init__(self,
url,
protocols = ["wamp"],
debug = debug,
debugCodePaths = debugCodePaths,
reactor = reactor)
WampFactory.__init__(self)
def startFactory(self):
"""
Called by Twisted when the factory starts up. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WebSocketClientFactory starting")
def stopFactory(self):
"""
Called by Twisted when the factory shuts down. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WebSocketClientFactory stopped")
class WampCraProtocol(WampProtocol):
"""
Base class for WAMP Challenge-Response Authentication protocols (client and server).
WAMP-CRA is a cryptographically strong challenge response authentication
protocol based on HMAC-SHA256.
The protocol performs in-band authentication of WAMP clients to WAMP servers.
WAMP-CRA does not introduce any new WAMP protocol level message types, but
implements the authentication handshake via standard WAMP RPCs with well-known
procedure URIs and signatures.
"""
def deriveKey(secret, extra = None):
"""
Computes a derived cryptographic key from a password according to PBKDF2
http://en.wikipedia.org/wiki/PBKDF2.
The function will only return a derived key if at least 'salt' is
present in the 'extra' dictionary. The complete set of attributes
that can be set in 'extra':
salt: The salt value to be used.
iterations: Number of iterations of derivation algorithm to run.
keylen: Key length to derive.
:returns str -- The derived key or the original secret.
"""
if type(extra) == dict and extra.has_key('salt'):
salt = str(extra['salt'])
iterations = int(extra.get('iterations', 10000))
keylen = int(extra.get('keylen', 32))
b = pbkdf2_bin(secret, salt, iterations, keylen, hashlib.sha256)
return binascii.b2a_base64(b).strip()
else:
return secret
deriveKey = staticmethod(deriveKey)
def authSignature(self, authChallenge, authSecret = None, authExtra = None):
"""
Compute the authentication signature from an authentication challenge and a secret.
:param authChallenge: The authentication challenge.
:type authChallenge: str
:param authSecret: The authentication secret.
:type authSecret: str
:authExtra: Extra authentication information for salting the secret. (salt, keylen,
iterations)
:type authExtra: dict
:returns str -- The authentication signature.
"""
if authSecret is None:
authSecret = ""
if isinstance(authSecret, unicode):
authSecret = authSecret.encode('utf8')
authSecret = WampCraProtocol.deriveKey(authSecret, authExtra)
h = hmac.new(authSecret, authChallenge, hashlib.sha256)
sig = binascii.b2a_base64(h.digest()).strip()
return sig
class WampCraClientProtocol(WampClientProtocol, WampCraProtocol):
"""
Simple, authenticated WAMP client protocol.
The client can perform WAMP-Challenge-Response-Authentication ("WAMP-CRA") to authenticate
itself to a WAMP server. The server needs to implement WAMP-CRA also of course.
"""
def authenticate(self, authKey = None, authExtra = None, authSecret = None):
"""
Authenticate the WAMP session to server.
:param authKey: The key of the authentication credentials, something like a user or application name.
:type authKey: str
:param authExtra: Any extra authentication information.
:type authExtra: dict
:param authSecret: The secret of the authentication credentials, something like the user password or application secret key.
:type authsecret: str
:returns Deferred -- Deferred that fires upon authentication success (with permissions) or failure.
"""
def _onAuthChallenge(challenge):
if authKey is not None:
challengeObj = self.factory._unserialize(challenge)
if 'authextra' in challengeObj:
authExtra = challengeObj['authextra']
sig = self.authSignature(challenge, authSecret, authExtra)
else:
sig = self.authSignature(challenge, authSecret)
else:
sig = None
d = self.call(WampProtocol.URI_WAMP_PROCEDURE + "auth", sig)
return d
d = self.call(WampProtocol.URI_WAMP_PROCEDURE + "authreq", authKey, authExtra)
d.addCallback(_onAuthChallenge)
return d
class WampCraServerProtocol(WampServerProtocol, WampCraProtocol):
"""
Simple, authenticating WAMP server protocol.
The server lets clients perform WAMP-Challenge-Response-Authentication ("WAMP-CRA")
to authenticate. The clients need to implement WAMP-CRA also of course.
To implement an authenticating server, override:
* getAuthSecret
* getAuthPermissions
* onAuthenticated
in your class deriving from this class.
"""
clientAuthTimeout = 0
"""
Client authentication timeout in seconds or 0 for infinite. A client
must perform authentication after the initial WebSocket handshake within
this timeout or the connection is failed.
"""
clientAuthAllowAnonymous = True
"""
Allow anonymous client authentication. When this is set to True, a client
may "authenticate" as anonymous.
"""
def getAuthPermissions(self, authKey, authExtra):
"""
Get the permissions the session is granted when the authentication succeeds
for the given key / extra information.
Override in derived class to implement your authentication.
A permissions object is structured like this::
{'permissions': {'rpc': [
{'uri': / RPC Endpoint URI - String /,
'call': / Allow to call? - Boolean /}
],
'pubsub': [
{'uri': / PubSub Topic URI / URI prefix - String /,
'prefix': / URI matched by prefix? - Boolean /,
'pub': / Allow to publish? - Boolean /,
'sub': / Allow to subscribe? - Boolean /}
]
}
}
You can add custom information to this object. The object will be provided again
when the client authentication succeeded in :meth:`onAuthenticated`.
:param authKey: The authentication key.
:type authKey: str
:param authExtra: Authentication extra information.
:type authExtra: dict
:returns obj or Deferred -- Return a permissions object or None when no permissions granted.
"""
return None
def getAuthSecret(self, authKey):
"""
Get the authentication secret for an authentication key, i.e. the
user password for the user name. Return None when the authentication
key does not exist.
Override in derived class to implement your authentication.
:param authKey: The authentication key.
:type authKey: str
:returns str or Deferred -- The authentication secret for the key or None when the key does not exist.
"""
return None
def onAuthTimeout(self):
"""
Fired when the client does not authenticate itself in time. The default implementation
will simply fail the connection.
May be overridden in derived class.
"""
if not self._clientAuthenticated:
log.msg("failing connection upon client authentication timeout [%s secs]" % self.clientAuthTimeout)
self.failConnection()
def onAuthenticated(self, authKey, permissions):
"""
Fired when client authentication was successful.
Override in derived class and register PubSub topics and/or RPC endpoints.
:param authKey: The authentication key the session was authenticated for.
:type authKey: str
:param permissions: The permissions object returned from :meth:`getAuthPermissions`.
:type permissions: obj
"""
pass
def registerForPubSubFromPermissions(self, permissions):
"""
Register topics for PubSub from auth permissions.
:param permissions: The permissions granted to the now authenticated client.
:type permissions: list
"""
for p in permissions['pubsub']:
## register topics for the clients
##
pubsub = (WampServerProtocol.PUBLISH if p['pub'] else 0) | \
(WampServerProtocol.SUBSCRIBE if p['sub'] else 0)
topic = p['uri']
if self.pubHandlers.has_key(topic) or self.subHandlers.has_key(topic):
## FIXME: handle dups!
log.msg("DUPLICATE TOPIC PERMISSION !!! " + topic)
self.registerForPubSub(topic, p['prefix'], pubsub)
def onSessionOpen(self):
"""
Called when WAMP session has been established, but not yet authenticated. The default
implementation will prepare the session allowing the client to authenticate itself.
"""
## register RPC endpoints for WAMP-CRA authentication
##
self.registerForRpc(self, WampProtocol.URI_WAMP_PROCEDURE, [WampCraServerProtocol.authRequest,
WampCraServerProtocol.auth])
## reset authentication state
##
self._clientAuthenticated = False
self._clientPendingAuth = None
self._clientAuthTimeoutCall = None
## client authentication timeout
##
if self.clientAuthTimeout > 0:
self._clientAuthTimeoutCall = self.factory.reactor.callLater(self.clientAuthTimeout, self.onAuthTimeout)
@exportRpc("authreq")
def authRequest(self, authKey = None, extra = None):
"""
RPC endpoint for clients to initiate the authentication handshake.
:param authKey: Authentication key, such as user name or application name.
:type authKey: str
:param extra: Authentication extra information.
:type extra: dict
:returns str -- Authentication challenge. The client will need to create an authentication signature from this.
"""
## check authentication state
##
if self._clientAuthenticated:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "already-authenticated"), "already authenticated")
if self._clientPendingAuth is not None:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "authentication-already-requested"), "authentication request already issues - authentication pending")
## check extra
##
if extra:
if type(extra) != dict:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "extra not a dictionary (was %s)." % str(type(extra)))
else:
extra = {}
#for k in extra:
# if type(extra[k]) not in [str, unicode, int, long, float, bool, types.NoneType]:
# raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "attribute '%s' in extra not a primitive type (was %s)" % (k, str(type(extra[k]))))
## check authKey
##
if authKey is None and not self.clientAuthAllowAnonymous:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "anonymous-auth-forbidden"), "authentication as anonymous forbidden")
if type(authKey) not in [str, unicode, types.NoneType]:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "authentication key must be a string (was %s)" % str(type(authKey)))
d = maybeDeferred(self.getAuthSecret, authKey)
def onGetAuthSecretOk(authSecret, authKey, extra):
if authKey is not None and authSecret is None:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "no-such-authkey"), "authentication key '%s' does not exist." % authKey)
## each authentication request gets a unique authid, which can only be used (later) once!
##
authid = newid()
## create authentication challenge
##
info = {'authid': authid, 'authkey': authKey, 'timestamp': utcnow(), 'sessionid': self.session_id,
'extra': extra}
pp = maybeDeferred(self.getAuthPermissions, authKey, extra)
def onAuthPermissionsOk(res):
if res is None:
res = {'permissions': {'pubsub': [], 'rpc': []}}
info['permissions'] = res['permissions']
if 'authextra' in res:
info['authextra'] = res['authextra']
if authKey:
## authenticated session
##
infoser = self.factory._serialize(info)
sig = self.authSignature(infoser, authSecret)
self._clientPendingAuth = (info, sig, res)
return infoser
else:
## anonymous session
##
self._clientPendingAuth = (info, None, res)
return None
def onAuthPermissionsError(e):
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "auth-permissions-error"), str(e))
pp.addCallbacks(onAuthPermissionsOk, onAuthPermissionsError)
return pp
d.addCallback(onGetAuthSecretOk, authKey, extra)
return d
@exportRpc("auth")
def auth(self, signature = None):
"""
RPC endpoint for clients to actually authenticate after requesting authentication and computing
a signature from the authentication challenge.
:param signature: Authentication signature computed by the client.
:type signature: str
:returns list -- A list of permissions the client is granted when authentication was successful.
"""
## check authentication state
##
if self._clientAuthenticated:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "already-authenticated"), "already authenticated")
if self._clientPendingAuth is None:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "no-authentication-requested"), "no authentication previously requested")
## check signature
##
if type(signature) not in [str, unicode, types.NoneType]:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "signature must be a string or None (was %s)" % str(type(signature)))
if self._clientPendingAuth[1] != signature:
## delete pending authentication, so that no retries are possible. authid is only valid for 1 try!!
##
self._clientPendingAuth = None
## notify the client of failed authentication, but only after a random,
## exponentially distributed delay. this (further) protects against
## timing attacks
##
d = Deferred()
def fail():
## FIXME: (optionally) drop the connection instead of returning RPC error?
##
d.errback(Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-signature"), "signature for authentication request is invalid"))
failDelaySecs = random.expovariate(1.0 / 0.8) # mean = 0.8 secs
self.factory.reactor.callLater(failDelaySecs, fail)
return d
## at this point, the client has successfully authenticated!
## get the permissions we determined earlier
##
perms = self._clientPendingAuth[2]
## delete auth request and mark client as authenticated
##
authKey = self._clientPendingAuth[0]['authkey']
self._clientAuthenticated = True
self._clientPendingAuth = None
if self._clientAuthTimeoutCall is not None:
self._clientAuthTimeoutCall.cancel()
self._clientAuthTimeoutCall = None
## fire authentication callback
##
self.onAuthenticated(authKey, perms)
## return permissions to client
##
return perms['permissions']
class Call:
"""
Thin-wrapper for incoming RPCs provided to call handlers registered via
- registerHandlerMethodForRpc
- registerHandlerProcedureForRpc
"""
def __init__(self,
proto,
callid,
uri,
args,
extra = None):
self.proto = proto
self.callid = callid
self.uri = uri
self.args = args
self.extra = extra
if self.proto.trackTimings:
self.timings = Tracker(tracker=None, tracked=None)
else:
self.timings = None
def track(self, key):
if self.timings:
self.timings.track(key)
class Handler(object):
"""
A handler for a certain class of messages.
"""
typeid = None
tracker = None
def __init__(self, proto, prefixes):
"""
Remember protocol and prefix map in instance variables.
"""
self.proto = proto
self.prefixes = prefixes
def handleMessage(self, msg_parts):
"""
Template method for handling a message.
Check if the correct handler for the message type was
called. Afterwards, assign all relevant parts of the message to
instance variables and call the (overridden) method
_handleMessage to actually handle the message.
"""
msgtype = msg_parts[0]
if self.typeid:
assert msgtype == self.typeid, \
"Message type %s does not match type id %s" % (msgtype,
self.typeid)
else:
assert False, \
"No typeid defined for %s" % self.__class__.__name__
if self._messageIsValid(msg_parts):
self._parseMessageParts(msg_parts)
self._handleMessage()
def _parseMessageParts(self, msg_parts):
"""
Assign the message parts to instance variables.
Has to be overridden in subclasses.
"""
raise NotImplementedError
def _messageIsValid(self, msg_parts):
"""
Check if the message parts have expected properties (type, etc.).
Has to be overridden in subclasses.
"""
raise NotImplementedError
def _handleMessage(self):
"""
Handle a specific kind of message.
Has to be overridden in subclasses.
"""
raise NotImplementedError
class CallHandler(Handler):
"""
A handler for incoming RPC calls.
"""
typeid = WampProtocol.MESSAGE_TYPEID_CALL
def _messageIsValid(self, msg_parts):
callid, uri = msg_parts[1:3]
if not isinstance(callid, (str, unicode)):
self.proto._protocolError(
("WAMP CALL message with invalid type %s for "
"<callid>") % type(callid))
return False
if not isinstance(uri, (str, unicode)):
self.proto._protocolError(
("WAMP CALL message with invalid type %s for "
"<uri>") % type(uri))
return False
return True
def _parseMessageParts(self, msg_parts):
"""
Parse message and create call object.
"""
self.callid = msg_parts[1]
self.uri = self.prefixes.resolveOrPass(msg_parts[2]) ### PFX - remove
self.args = msg_parts[3:]
def _handleMessage(self):
"""
Perform the RPC call and attach callbacks to its deferred object.
"""
call = self._onBeforeCall()
## execute incoming RPC
d = maybeDeferred(self._callProcedure, call)
## register callback and errback with extra argument call
d.addCallbacks(self._onAfterCallSuccess,
self._onAfterCallError,
callbackArgs = (call,),
errbackArgs = (call,))
def _onBeforeCall(self):
"""
Create call object to move around call data
"""
uri, args = self.proto.onBeforeCall(self.callid, self.uri, self.args, bool(self.proto.procForUri(self.uri)))
call = Call(self.proto, self.callid, uri, args)
call.track("onBeforeCall")
return call
def _callProcedure(self, call):
"""
Actually performs the call of a procedure invoked via RPC.
"""
m = self.proto.procForUri(call.uri)
if m is None:
raise Exception(WampProtocol.URI_WAMP_ERROR_NO_SUCH_RPC_ENDPOINT, "No RPC endpoint registered for %s." % call.uri)
obj, method_or_proc, is_handler = m[:3]
if not is_handler:
return self._performProcedureCall(call, obj, method_or_proc)
else:
call.extra = m[3]
return self._delegateToRpcHandler(call, obj, method_or_proc)
def _performProcedureCall(self, call, obj, method_or_proc):
"""
Perform a RPC method / procedure call.
"""
cargs = tuple(call.args) if call.args else ()
if obj:
## call object method
return method_or_proc(obj, *cargs)
else:
## call free-standing function/procedure
return method_or_proc(*cargs)
def _delegateToRpcHandler(self, call, obj, method_or_proc):
"""
Delegate call to RPC handler.
"""
if obj:
## call RPC handler on object
return method_or_proc(obj, call)
else:
## call free-standing RPC handler
return method_or_proc(call)
def _onAfterCallSuccess(self, result, call):
"""
Execute custom success handler and send call result.
"""
## track timing and fire user callback
call.track("onAfterCallSuccess")
call.result = self.proto.onAfterCallSuccess(result, call)
## send out WAMP message
self._sendCallResult(call)
def _onAfterCallError(self, error, call):
"""
Execute custom error handler and send call error.
"""
## track timing and fire user callback
call.track("onAfterCallError")
call.error = self.proto.onAfterCallError(error, call)
## send out WAMP message
self._sendCallError(call)
def _sendCallResult(self, call):
"""
Marshal and send a RPC success result.
"""
msg = [WampProtocol.MESSAGE_TYPEID_CALL_RESULT, call.callid, call.result]
try:
rmsg = self.proto.serializeMessage(msg)
except:
raise Exception("call result not JSON serializable")
else:
## now actually send WAMP message
self.proto.sendMessage(rmsg)
## track timing and fire user callback
call.track("onAfterSendCallSuccess")
self.proto.onAfterSendCallSuccess(rmsg, call)
def _sendCallError(self, call):
"""
Marshal and send a RPC error result.
"""
killsession = False
rmsg = None
try:
error_info, killsession = self._extractErrorInfo(call)
rmsg = self._assembleErrorMessage(call, *error_info)
except Exception as e:
rmsg = self._handleProcessingError(call, e)
finally:
if rmsg:
## now actually send WAMP message
self.proto.sendMessage(rmsg)
## track timing and fire user callback
call.track("onAfterSendCallError")
self.proto.onAfterSendCallError(rmsg, call)
if killsession:
self.proto.sendClose(3000, u"killing WAMP session upon request by application exception")
else:
raise Exception("fatal: internal error in CallHandler._sendCallError")
def _extractErrorInfo(self, call):
"""
Extract error information from the call.
"""
## get error args and len
##
eargs = call.error.value.args
nargs = len(eargs)
if nargs > 4:
raise Exception("invalid args length %d for exception" % nargs)
## erroruri & errordesc
##
if nargs == 0:
erroruri = WampProtocol.URI_WAMP_ERROR_GENERIC
errordesc = WampProtocol.DESC_WAMP_ERROR_GENERIC
elif nargs == 1:
erroruri = WampProtocol.URI_WAMP_ERROR_GENERIC
errordesc = eargs[0]
else:
erroruri = eargs[0]
errordesc = eargs[1]
## errordetails
##
errordetails = None
if nargs >= 3:
errordetails = eargs[2]
elif self.proto.includeTraceback:
try:
## we'd like to do ..
#tb = call.error.getTraceback()
## .. but the implementation in Twisted
## http://twistedmatrix.com/trac/browser/tags/releases/twisted-13.1.0/twisted/python/failure.py#L529
## uses cStringIO which cannot handle Unicode string in tracebacks. Hence we do our own:
io = StringIO.StringIO()
call.error.printTraceback(file = io)
tb = io.getvalue()
except Exception as ie:
print("INTERNAL ERROR [_extractErrorInfo / getTraceback()]: %s" % ie)
traceback.print_stack()
else:
errordetails = tb.splitlines()
## killsession
##
killsession = False
if nargs >= 4:
killsession = eargs[3]
## recheck all error component types
##
if type(erroruri) not in [str, unicode]:
raise Exception("invalid type %s for errorUri" % type(erroruri))
if type(errordesc) not in [str, unicode]:
raise Exception("invalid type %s for errorDesc" % type(errordesc))
## errordetails must be JSON serializable. If not, we get exception later in sendMessage.
## We don't check here, since the only way would be to serialize to JSON and
## then we'd serialize twice (here and in sendMessage)
if type(killsession) not in [bool, types.NoneType]:
raise Exception("invalid type %s for killSession" % type(killsession))
return (erroruri, errordesc, errordetails), killsession
def _assembleErrorMessage(self, call, erroruri, errordesc, errordetails):
"""
Assemble a WAMP RPC error message.
"""
if errordetails is not None:
msg = [WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
call.callid,
self.prefixes.shrink(erroruri), ### PFX - remove
errordesc,
errordetails]
else:
msg = [WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
call.callid,
self.prefixes.shrink(erroruri), ### PFX - remove
errordesc]
## serialize message. this can fail if errorDetails is not
## serializable
try:
rmsg = self.proto.serializeMessage(msg)
except Exception as e:
raise Exception(
"invalid object for errorDetails - not serializable (%s)" %
str(e))
return rmsg
def _handleProcessingError(self, call, e):
"""
Create a message describing what went wrong during processing an
exception.
"""
msg = [WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
call.callid,
### PFX - remove
self.prefixes.shrink(WampProtocol.URI_WAMP_ERROR_INTERNAL),
str(e)]
if self.proto.includeTraceback:
try:
tb = call.error.getTraceback()
except Exception as ie:
## FIXME: find out why this can fail with
## "'unicode' does not have the buffer interface"
print("INTERNAL ERROR (getTraceback): %s" % ie)
else:
msg.append(tb.splitlines())
result = self.proto.serializeMessage(msg)
return result
class CallResultHandler(Handler):
"""
A handler for to RPC call results.
"""
typeid = WampProtocol.MESSAGE_TYPEID_CALL_RESULT
def _messageIsValid(self, msg_parts):
if len(msg_parts) < 2:
self.proto._protocolError(
"WAMP CALL_RESULT message without <callid>")
return False
if len(msg_parts) != 3:
self.proto._protocolError(
"WAMP CALL_RESULT message with invalid length %d" % len(msg_parts))
return False
if type(msg_parts[1]) not in [unicode, str]:
self.proto._protocolError(
("WAMP CALL_RESULT message with invalid type %s for "
"<callid>") % type(msg_parts[1]))
return False
return True
def _parseMessageParts(self, msg_parts):
"""
Extract call result from message parts.
"""
self.callid = str(msg_parts[1])
self.result = msg_parts[2]
def _handleMessage(self):
## Pop and process Call Deferred
##
d = self.proto.calls.pop(self.callid, None)
if d:
## WAMP CALL_RESULT
##
d.callback(self.result)
else:
if self.proto.debugWamp:
log.msg("callid not found for received call result message")
class CallErrorHandler(Handler):
typeid = WampProtocol.MESSAGE_TYPEID_CALL_ERROR
def _messageIsValid(self, msg_parts):
if len(msg_parts) not in [4, 5]:
self.proto._protocolError(
"call error message invalid length %d" % len(msg_parts))
return False
## Error URI
##
if type(msg_parts[2]) not in [unicode, str]:
self.proto._protocolError(
"invalid type %s for errorUri in call error message" %
str(type(msg_parts[2])))
return False
## Error Description
##
if type(msg_parts[3]) not in [unicode, str]:
self.proto._protocolError(
"invalid type %s for errorDesc in call error message" %
str(type(msg_parts[3])))
return False
return True
def _parseMessageParts(self, msg_parts):
"""
Extract error information from message parts.
"""
self.callid = str(msg_parts[1])
self.erroruri = str(msg_parts[2])
self.errordesc = str(msg_parts[3])
## Error Details
##
if len(msg_parts) > 4:
self.errordetails = msg_parts[4]
else:
self.errordetails = None
def _handleMessage(self):
"""
Fire Call Error Deferred.
"""
##
## Pop and process Call Deferred
d = self.proto.calls.pop(self.callid, None)
if d:
e = Exception(self.erroruri, self.errordesc, self.errordetails)
d.errback(e)
else:
if self.proto.debugWamp:
log.msg("callid not found for received call error message")
|
bradrf/garbagetruck | refs/heads/master | tests/test_garbagetruck.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_garbagetruck
----------------------------------
Tests for `garbagetruck` module.
"""
import pytest
from contextlib import contextmanager
from click.testing import CliRunner
from garbagetruck import garbagetruck
from garbagetruck import cli
class TestGarbagetruck(object):
@classmethod
def setup_class(cls):
pass
def test_something(self):
pass
def test_command_line_interface(self):
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'Commands' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert 'Show this message and exit.' in help_result.output
@classmethod
def teardown_class(cls):
pass
|
thiagomg/experiments | refs/heads/master | math/thinaso.py | 4 | def get_single_number(nums, pos):
if pos >= 0 and pos < len(nums):
return nums[pos]
return 0
def get_number(nums, pos):
pr = pos+1
pl = pos-1
while get_single_number(nums, pos) == 0:
pos = pr
pr += 1
if get_single_number(nums, pos) == 0:
pos = pl
pl -= 1
n = nums[pos]
if n != 0:
nums[pos] = 0
return n
def shuffle_numbers(nums):
import random
random.seed()
ret = []
total = len(nums)
for i in range(0, total):
pos = random.randint(0, total-1)
n = get_number(nums, pos)
ret.append(n)
return ret
print shuffle_numbers(range(1, 76))
#print shuffle_numbers([2, 4, 6, 8, 10, 12])
#print shuffle_numbers(range(1, 5) + range(6, 10))
|
ptphp/PtServer | refs/heads/master | library/core/ptnetworkaccessmanager.py | 1 | from PySide.QtNetwork import QNetworkAccessManager
from PySide.QtNetwork import QNetworkProxy
from PySide.QtCore import SIGNAL, QUrl
import urlparse
class PtNetworkAccessManager(QNetworkAccessManager):
_url_filter = []
def __init__(self, parent):
QNetworkAccessManager.__init__(self, parent)
self.finished.connect(self._request_ended)
def _request_ended(self,reply):
pass
def createRequest(self, operation, request, outgoingData):
url = request.url().toString()
for h in request.rawHeaderList():
pass
#self._debug(DEBUG, " %s: %s" % (h, request.rawHeader(h)))
if self._url_filter:
if url in self._url_filter:
#self._debug(INFO, "URL filtered: %s" % url)
request.setUrl(QUrl("about:blank"))
else:
pass
#self._debug(DEBUG, "URL not filtered: %s" % url)
#print url
#if url == "http://v5.ele.me/":
#request.setRawHeader("Accept-Encoding","")
reply = QNetworkAccessManager.createRequest(self, operation, request, outgoingData)
#self.emit(SIGNAL('networkRequestCreated(QNetworkReply*)'), reply)
#if html[:6]=='\x1f\x8b\x08\x00\x00\x00':
# html=gzip.GzipFile(fileobj=StringIO(html)).read()
return reply
def get_proxy(self):
"""Return string containing the current proxy."""
return self.proxy()
def set_proxy(self, string_proxy=None):
"""Set proxy:
url can be in the form:
- hostname (http proxy)
- hostname:port (http proxy)
- username:password@hostname:port (http proxy)
- http://username:password@hostname:port
- socks5://username:password@hostname:port
- https://username:password@hostname:port
- httpcaching://username:password@hostname:port
- ftpcaching://username:password@hostname:port
"""
if not string_proxy:
string_proxy = ''
if string_proxy:
urlinfo = urlparse.urlparse(string_proxy)
# default to http proxy if we have a string
if not urlinfo.scheme:
string_proxy = "http://%s" % string_proxy
urlinfo = urlparse.urlparse(string_proxy)
self.proxy_url = string_proxy
proxy = QNetworkProxy()
if urlinfo.scheme == 'socks5':
proxy.setType(QNetworkProxy.Socks5Proxy)
elif urlinfo.scheme in ['https', 'http']:
proxy.setType(QNetworkProxy.HttpProxy)
elif urlinfo.scheme == 'httpcaching':
proxy.setType(QNetworkProxy.HttpCachingProxy)
elif urlinfo.scheme == 'ftpcaching':
proxy.setType(QNetworkProxy.FtpCachingProxy)
else:
proxy.setType(QNetworkProxy.NoProxy)
if urlinfo.hostname != None:
proxy.setHostName(urlinfo.hostname)
if urlinfo.port != None:
proxy.setPort(urlinfo.port)
if urlinfo.username != None:
proxy.setUser(urlinfo.username)
else:
proxy.setUser('')
if urlinfo.password != None:
proxy.setPassword(urlinfo.password)
else:
proxy.setPassword('')
self.setProxy(proxy)
return self.proxy() |
liggitt/openshift-ansible | refs/heads/master | roles/lib_openshift/library/oc_serviceaccount.py | 18 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount
short_description: Module to manage openshift service accounts
description:
- Manage openshift service accounts programmatically.
options:
state:
description:
- If present, the service account will be created if it doesn't exist or updated if different. If absent, the service account will be removed if present. If list, information about the service account will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
name:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account.
required: true
default: default
aliases: []
secrets:
description:
- A list of secrets that are associated with the service account.
required: false
default: None
aliases: []
image_pull_secrets:
description:
- A list of the image pull secrets that are associated with the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create registry serviceaccount
oc_serviceaccount:
name: registry
namespace: default
secrets:
- docker-registry-config
- registry-secret
register: sa_out
'''
# -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCServiceAccount(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCServiceAccount, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.service_account = None
def exists(self):
''' return whether a volume exists '''
if self.service_account:
return True
return False
def get(self):
'''return volume information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.service_account = ServiceAccount(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
elif 'namespaces \"%s\" not found' % self.config.namespace in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
self.service_account.add_secret(secret)
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
self.service_account.add_image_pull_secret(secret)
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
# since creating an service account generates secrets and imagepullsecrets
# check_def_equal will not work
# Instead, verify all secrets passed are in the list
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
return True
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
return True
return False
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the oc_serviceaccount module'''
rconfig = ServiceAccountConfig(params['name'],
params['namespace'],
params['kubeconfig'],
params['secrets'],
params['image_pull_secrets'],
)
oc_sa = OCServiceAccount(rconfig,
verbose=params['debug'])
state = params['state']
api_rval = oc_sa.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = oc_sa.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
# Create it here
api_rval = oc_sa.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
########
# Update
########
if oc_sa.needs_update():
api_rval = oc_sa.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
return {'changed': False, 'results': api_rval, 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
def main():
'''
ansible oc module for service accounts
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
secrets=dict(default=None, type='list'),
image_pull_secrets=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCServiceAccount.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
|
dwagon/pymoo | refs/heads/master | moo/building/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
ClearCorp/server-tools | refs/heads/9.0 | password_security/exceptions.py | 11 | # -*- coding: utf-8 -*-
# Copyright 2015 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from openerp.exceptions import Warning as UserError
class PassError(UserError):
""" Example: When you try to create an insecure password."""
def __init__(self, msg):
self.message = msg
super(PassError, self).__init__(msg)
|
gautam1858/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/distributions/bijector_test.py | 13 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseBijectorTest(test.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
with self.assertRaisesRegexp(TypeError,
("Can't instantiate abstract class Bijector "
"with abstract methods __init__")):
bijector.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(bijector.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
super(_BareBonesBijector, self).__init__(forward_min_event_ndims=0)
bij = _BareBonesBijector()
self.assertEqual([], bij.graph_parents)
self.assertEqual(False, bij.is_constant_jacobian)
self.assertEqual(False, bij.validate_args)
self.assertEqual(None, bij.dtype)
self.assertEqual("bare_bones_bijector", bij.name)
for shape in [[], [1, 2], [1, 2, 3]]:
forward_event_shape_ = self.evaluate(
bij.inverse_event_shape_tensor(shape))
inverse_event_shape_ = self.evaluate(
bij.forward_event_shape_tensor(shape))
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
with self.assertRaisesRegexp(
NotImplementedError, "inverse not implemented"):
bij.inverse(0)
with self.assertRaisesRegexp(
NotImplementedError, "forward not implemented"):
bij.forward(0)
with self.assertRaisesRegexp(
NotImplementedError, "inverse_log_det_jacobian not implemented"):
bij.inverse_log_det_jacobian(0, event_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError, "forward_log_det_jacobian not implemented"):
bij.forward_log_det_jacobian(0, event_ndims=0)
class IntentionallyMissingError(Exception):
pass
class BrokenBijector(bijector.Bijector):
"""Forward and inverse are not inverses of each other."""
def __init__(
self, forward_missing=False, inverse_missing=False, validate_args=False):
super(BrokenBijector, self).__init__(
validate_args=validate_args, forward_min_event_ndims=0, name="broken")
self._forward_missing = forward_missing
self._inverse_missing = inverse_missing
def _forward(self, x):
if self._forward_missing:
raise IntentionallyMissingError
return 2 * x
def _inverse(self, y):
if self._inverse_missing:
raise IntentionallyMissingError
return y / 2.
def _inverse_log_det_jacobian(self, y): # pylint:disable=unused-argument
if self._inverse_missing:
raise IntentionallyMissingError
return -math_ops.log(2.)
def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument
if self._forward_missing:
raise IntentionallyMissingError
return math_ops.log(2.)
class BijectorTestEventNdims(test.TestCase):
def testBijectorNonIntegerEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegexp(ValueError, "Expected integer"):
bij.forward_log_det_jacobian(1., event_ndims=1.5)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
bij.inverse_log_det_jacobian(1., event_ndims=1.5)
def testBijectorArrayEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=(1, 2))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=(1, 2))
@test_util.run_deprecated_v1
def testBijectorDynamicEventNdims(self):
bij = BrokenBijector(validate_args=True)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=None)
with self.cached_session():
with self.assertRaisesOpError("Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
with self.assertRaisesOpError("Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
@six.add_metaclass(abc.ABCMeta)
class BijectorCachingTestBase(object):
@abc.abstractproperty
def broken_bijector_cls(self):
# return a BrokenBijector type Bijector, since this will test the caching.
raise IntentionallyMissingError("Not implemented")
def testCachingOfForwardResults(self):
broken_bijector = self.broken_bijector_cls(inverse_missing=True)
x = constant_op.constant(1.1)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = broken_bijector.forward(x)
_ = broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Now, everything should be cached if the argument is y.
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
try:
broken_bijector.inverse(y)
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.inverse_log_det_jacobian(y, event_ndims=1)
def testCachingOfInverseResults(self):
broken_bijector = self.broken_bijector_cls(forward_missing=True)
y = constant_op.constant(1.1)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = broken_bijector.inverse(y)
_ = broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Now, everything should be cached if the argument is x.
try:
broken_bijector.forward(x)
broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.forward_log_det_jacobian(x, event_ndims=1)
class BijectorCachingTest(BijectorCachingTestBase, test.TestCase):
"""Test caching with BrokenBijector."""
@property
def broken_bijector_cls(self):
return BrokenBijector
class ExpOnlyJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ExpOnlyJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=False,
forward_min_event_ndims=forward_min_event_ndims,
name="exp")
def _inverse_log_det_jacobian(self, y):
return -math_ops.log(y)
def _forward_log_det_jacobian(self, x):
return math_ops.log(x)
class ConstantJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ConstantJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
name="c")
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(2., y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(-2., x.dtype)
class BijectorReduceEventDimsTest(test.TestCase):
"""Test caching with BrokenBijector."""
def testReduceEventNdimsForward(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
np.log(x),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(np.log(x), axis=-1),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(np.log(x), axis=(-1, -2)),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsForwardRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, "must be larger than"):
bij.forward_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsInverse(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
-np.log(x),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(-np.log(x), axis=-1),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(-np.log(x), axis=(-1, -2)),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, "must be larger than"):
bij.inverse_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsForwardConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
-2.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
-4.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-8.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
2.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
4.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
8.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
@test_util.run_deprecated_v1
def testHandlesNonStaticEventNdims(self):
x_ = [[[1., 2.], [3., 4.]]]
x = array_ops.placeholder_with_default(x_, shape=None)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=[])
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
bij.inverse_log_det_jacobian(x, event_ndims=event_ndims)
with self.cached_session() as sess:
ildj = sess.run(bij.inverse_log_det_jacobian(x, event_ndims=event_ndims),
feed_dict={event_ndims: 1})
self.assertAllClose(-np.log(x_), ildj)
if __name__ == "__main__":
test.main()
|
bufferapp/buffer-django-nonrel | refs/heads/master | tests/regressiontests/generic_views/views.py | 49 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from regressiontests.generic_views.models import Artist, Author, Book, Page
from regressiontests.generic_views.forms import AuthorForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
return {
'params': kwargs,
'key': 'value'
}
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'last': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5;
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
|
photoninger/ansible | refs/heads/devel | test/runner/lib/cloud/azure.py | 18 | """Azure plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.util import (
ApplicationError,
display,
is_shippable,
)
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.http import (
HttpClient,
urlparse,
urlunparse,
parse_qs,
)
from lib.core_ci import (
AnsibleCoreCI,
)
class AzureCloudProvider(CloudProvider):
"""Azure cloud provider plugin. Sets up cloud resources before delegation."""
SHERLOCK_CONFIG_PATH = os.path.expanduser('~/.ansible-sherlock-ci.cfg')
def __init__(self, args):
"""
:type args: TestConfig
"""
super(AzureCloudProvider, self).__init__(args)
self.aci = None
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
return
if is_shippable():
return
super(AzureCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AzureCloudProvider, self).setup()
if not self._use_static_config():
self._setup_dynamic()
get_config(self.config_path) # check required variables
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.aci:
self.aci.stop()
super(AzureCloudProvider, self).cleanup()
def _setup_dynamic(self):
"""Request Azure credentials through Sherlock."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
response = {}
if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
with open(self.SHERLOCK_CONFIG_PATH, 'r') as sherlock_fd:
sherlock_uri = sherlock_fd.readline().strip() + '&rgcount=2'
parts = urlparse(sherlock_uri)
query_string = parse_qs(parts.query)
base_uri = urlunparse(parts[:4] + ('', ''))
if 'code' not in query_string:
example_uri = 'https://example.azurewebsites.net/api/sandbox-provisioning'
raise ApplicationError('The Sherlock URI must include the API key in the query string. Example: %s?code=xxx' % example_uri)
display.info('Initializing azure/sherlock from: %s' % base_uri, verbosity=1)
http = HttpClient(self.args)
result = http.get(sherlock_uri)
display.info('Started azure/sherlock from: %s' % base_uri, verbosity=1)
if not self.args.explain:
response = result.json()
else:
aci = self._create_ansible_core_ci()
aci_result = aci.start()
if not self.args.explain:
response = aci_result['azure']
self.aci = aci
if not self.args.explain:
values = dict(
AZURE_CLIENT_ID=response['clientId'],
AZURE_SECRET=response['clientSecret'],
AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
AZURE_TENANT=response['tenantId'],
RESOURCE_GROUP=response['resourceGroupNames'][0],
RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
)
config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'azure', 'azure', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AzureCloudEnvironment(CloudEnvironment):
"""Azure cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
config = get_config(self.config_path)
cmd.append('-e')
cmd.append('resource_prefix=%s' % self.resource_prefix)
cmd.append('-e')
cmd.append('resource_group=%s' % config['RESOURCE_GROUP'])
cmd.append('-e')
cmd.append('resource_group_secondary=%s' % config['RESOURCE_GROUP_SECONDARY'])
for key in config:
env[key] = config[key]
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
@property
def inventory_hosts(self):
"""
:rtype: str | None
"""
return 'azure'
def get_config(config_path):
"""
:param config_path: str
:return: dict[str, str]
"""
with open(config_path, 'r') as config_fd:
lines = [line for line in config_fd.read().splitlines() if ':' in line and line.strip() and not line.strip().startswith('#')]
config = dict((kvp[0].strip(), kvp[1].strip()) for kvp in [line.split(':', 1) for line in lines])
rg_vars = (
'RESOURCE_GROUP',
'RESOURCE_GROUP_SECONDARY',
)
sp_vars = (
'AZURE_CLIENT_ID',
'AZURE_SECRET',
'AZURE_SUBSCRIPTION_ID',
'AZURE_TENANT',
)
ad_vars = (
'AZURE_AD_USER',
'AZURE_PASSWORD',
'AZURE_SUBSCRIPTION_ID',
)
rg_ok = all(var in config for var in rg_vars)
sp_ok = all(var in config for var in sp_vars)
ad_ok = all(var in config for var in ad_vars)
if not rg_ok:
raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
if not sp_ok and not ad_ok:
raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
return config
|
openmb/openblackhole-enigma2 | refs/heads/master | lib/python/Components/Converter/RdsInfo.py | 163 | from enigma import iRdsDecoder, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
class RdsInfo(Converter, object):
RASS_INTERACTIVE_AVAILABLE = 0
RTP_TEXT_CHANGED = 1
RADIO_TEXT_CHANGED = 2
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {
"RadioText": (self.RADIO_TEXT_CHANGED, (iPlayableService.evUpdatedRadioText,)),
"RtpText": (self.RTP_TEXT_CHANGED, (iPlayableService.evUpdatedRtpText,)),
"RasInteractiveAvailable": (self.RASS_INTERACTIVE_AVAILABLE, (iPlayableService.evUpdatedRassInteractivePicMask,))
}[type]
@cached
def getText(self):
decoder = self.source.decoder
text = ""
if decoder:
if self.type == self.RADIO_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RadioText)
elif self.type == self.RTP_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RtpText)
else:
print "unknown RdsInfo Converter type", self.type
return text
text = property(getText)
@cached
def getBoolean(self):
decoder = self.source.decoder
if self.type == self.RASS_INTERACTIVE_AVAILABLE:
mask = decoder and decoder.getRassInteractiveMask()
return (mask and mask[0] & 1 and True) or False
elif self.type == self.RADIO_TEXT_CHANGED:
return (len(decoder.getText(iRdsDecoder.RadioText)) and True) or False
elif self.type == self.RTP_TEXT_CHANGED:
return (len(decoder.getText(iRdsDecoder.RtpText)) and True) or False
boolean = property(getBoolean)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
|
joshdavies89/e-commerce | refs/heads/master | src/products/admin.py | 1 | from django.contrib import admin
from .models import Product
admin.site.register(Product)
|
boundarydevices/android_external_chromium_org | refs/heads/cm-12.0 | tools/telemetry/telemetry/timeline/async_slice.py | 10 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event as event
class AsyncSlice(event.TimelineEvent):
''' A AsyncSlice represents an interval of time during which an
asynchronous operation is in progress. An AsyncSlice consumes no CPU time
itself and so is only associated with Threads at its start and end point.
'''
def __init__(self, category, name, timestamp, args=None,
duration=0, start_thread=None, end_thread=None,
thread_start=None, thread_duration=None):
super(AsyncSlice, self).__init__(
category, name, timestamp, duration, thread_start, thread_duration,
args)
self.parent_slice = None
self.start_thread = start_thread
self.end_thread = end_thread
self.sub_slices = []
self.id = None
def AddSubSlice(self, sub_slice):
assert sub_slice.parent_slice == self
self.sub_slices.append(sub_slice)
def IterEventsInThisContainerRecrusively(self):
for sub_slice in self.sub_slices:
yield sub_slice
|
HopeFOAM/HopeFOAM | refs/heads/master | ThirdParty-0.1/ParaView-5.0.1/Examples/Catalyst/PythonFullExample/coprocessor.py | 1 | coProcessor = None
usecp = True
def initialize():
global coProcessor, usecp
if usecp:
import paraview
import vtkParallelCorePython
import vtk
from mpi4py import MPI
import os, sys
paraview.options.batch = True
paraview.options.symmetric = True
import vtkPVClientServerCoreCorePython as CorePython
try:
import vtkPVServerManagerApplicationPython as ApplicationPython
except:
paraview.print_error("Error: Cannot import vtkPVServerManagerApplicationPython")
if not CorePython.vtkProcessModule.GetProcessModule():
pvoptions = None
if paraview.options.batch:
pvoptions = CorePython.vtkPVOptions();
pvoptions.SetProcessType(CorePython.vtkPVOptions.PVBATCH)
if paraview.options.symmetric:
pvoptions.SetSymmetricMPIMode(True)
ApplicationPython.vtkInitializationHelper.Initialize(sys.executable, CorePython.vtkProcessModule.PROCESS_BATCH, pvoptions)
import paraview.servermanager as pvsm
# we need ParaView 4.2 since ParaView 4.1 doesn't properly wrap
# vtkPVPythonCatalystPython
if pvsm.vtkSMProxyManager.GetVersionMajor() != 4 or \
pvsm.vtkSMProxyManager.GetVersionMinor() < 2:
print 'Must use ParaView v4.2 or greater'
sys.exit(0)
import numpy
import vtkPVCatalystPython as catalyst
import vtkPVPythonCatalystPython as pythoncatalyst
import paraview.simple
import paraview.vtk as vtk
from paraview import numpy_support
paraview.options.batch = True
paraview.options.symmetric = True
coProcessor = catalyst.vtkCPProcessor()
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
from mpi4py import MPI
def finalize():
global coProcessor, usecp
if usecp:
coProcessor.Finalize()
import vtkPVServerManagerApplicationPython as ApplicationPython
ApplicationPython.vtkInitializationHelper.Finalize()
def addscript(name):
global coProcessor
import vtkPVPythonCatalystPython as pythoncatalyst
pipeline = pythoncatalyst.vtkCPPythonScriptPipeline()
pipeline.Initialize(name)
coProcessor.AddPipeline(pipeline)
def coprocess(time, timeStep, grid, attributes):
global coProcessor
import vtk
import vtkPVCatalystPython as catalyst
import paraview
from paraview import numpy_support
dataDescription = catalyst.vtkCPDataDescription()
dataDescription.SetTimeData(time, timeStep)
dataDescription.AddInput("input")
if coProcessor.RequestDataDescription(dataDescription):
import fedatastructures
imageData = vtk.vtkImageData()
imageData.SetExtent(grid.XStartPoint, grid.XEndPoint, 0, grid.NumberOfYPoints-1, 0, grid.NumberOfZPoints-1)
imageData.SetSpacing(grid.Spacing)
velocity = paraview.numpy_support.numpy_to_vtk(attributes.Velocity)
velocity.SetName("velocity")
imageData.GetPointData().AddArray(velocity)
pressure = numpy_support.numpy_to_vtk(attributes.Pressure)
pressure.SetName("pressure")
imageData.GetCellData().AddArray(pressure)
dataDescription.GetInputDescriptionByName("input").SetGrid(imageData)
dataDescription.GetInputDescriptionByName("input").SetWholeExtent(0, grid.NumberOfGlobalXPoints-1, 0, grid.NumberOfYPoints-1, 0, grid.NumberOfZPoints-1)
coProcessor.CoProcess(dataDescription)
|
fiji-flo/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/third_party/py/testing/code/test_excinfo.py | 55 | # -*- coding: utf-8 -*-
import py
import pytest
import sys
from test_source import astonly
from py._code.code import FormattedExcinfo, ReprExceptionInfo
queue = py.builtin._tryimport('queue', 'Queue')
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
try:
import importlib
except ImportError:
invalidate_import_caches = None
else:
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
broken_on_modern_pytest = pytest.mark.xfail(
pytest_version_info[0] != 2,
reason="this test hasn't been fixed after moving py.code into pytest",
run=False
)
class TWMock:
def __init__(self):
self.lines = []
def sep(self, sep, line=None):
self.lines.append((sep, line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
fullwidth = 80
def test_excinfo_simple():
try:
raise ValueError
except ValueError:
info = py.code.ExceptionInfo()
assert info.type == ValueError
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = py.code.ExceptionInfo()
linenumbers = [
py.code.getrawcode(f).co_firstlineno-1+3,
py.code.getrawcode(f).co_firstlineno-1+1,
py.code.getrawcode(g).co_firstlineno-1+1,
]
l = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in l]
assert foundlinenumbers == linenumbers
#for x in info:
# print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
#xxx
# testchain for getentries test below
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = True
f()
#
def h():
#
g()
#
class TestTraceback_f_g_h:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = py.code.ExceptionInfo()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ['f', 'g', 'h']
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource())
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
@astonly
@failsonjython
def test_traceback_entry_getsource_in_construct(self):
source = py.code.Source("""\
def xyz():
try:
raise ValueError
except somenoname:
pass
xyz()
""")
try:
exec (source.compile())
except NameError:
tb = py.code.ExceptionInfo().traceback
print (tb[-1].getsource())
s = str(tb[-1].getsource())
assert s.startswith("def xyz():\n try:")
assert s.strip().endswith("except somenoname:")
def test_traceback_cut(self):
co = py.code.Code(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, testdir):
p = testdir.makepyfile("def f(): raise ValueError")
excinfo = py.test.raises(ValueError, "p.pyimport().f()")
basedir = py.path.local(py.test.__file__).dirpath()
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
if hasattr(x, 'path'):
assert not py.path.local(x.path).relto(basedir)
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter()
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n-1)
excinfo = pytest.raises(RuntimeError, f, 100)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self):
def do_stuff():
raise RuntimeError
def reraise_me():
import sys
exc, val, tb = sys.exc_info()
py.builtin._reraise(exc, val, tb)
def f(n):
try:
do_stuff()
except:
reraise_me()
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
# XXX: simplified locally testable version
decorator = py.test.importorskip('decorator').decorator
def log(f, *k, **kw):
print('%s %s' % (k, kw))
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError('')
fail = log(log(fail))
excinfo = py.test.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_traceback_getcrashentry(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == 'h'
def test_traceback_getcrashentry_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == 'g'
def hello(x):
x + 5
def test_tbentry_reinterpret():
try:
hello("hello")
except TypeError:
excinfo = py.code.ExceptionInfo()
tbentry = excinfo.traceback[-1]
msg = tbentry.reinterpret()
assert msg.startswith("TypeError: ('hello' + 5)")
def test_excinfo_exconly():
excinfo = py.test.raises(ValueError, h)
assert excinfo.exconly().startswith('ValueError')
excinfo = py.test.raises(ValueError,
"raise ValueError('hello\\nworld')")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith('ValueError')
assert msg.endswith("world")
def test_excinfo_repr():
excinfo = py.test.raises(ValueError, h)
s = repr(excinfo)
assert s == "<ExceptionInfo ValueError tblen=4>"
def test_excinfo_str():
excinfo = py.test.raises(ValueError, h)
s = str(excinfo)
assert s.startswith(__file__[:-9]) # pyc file and $py.class
assert s.endswith("ValueError")
assert len(s.split(":")) >= 3 # on windows it's 4
def test_excinfo_errisinstance():
excinfo = py.test.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec ("raise ValueError()")
except ValueError:
excinfo = py.code.ExceptionInfo()
s = str(excinfo.traceback[-1])
assert s == " File '<string>':1 in <module>\n ???\n"
def test_excinfo_no_python_sourcecode(tmpdir):
#XXX: simplified locally testable version
tmpdir.join('test.txt').write("{{ h()}}:")
jinja2 = py.test.importorskip('jinja2')
loader = jinja2.FileSystemLoader(str(tmpdir))
env = jinja2.Environment(loader=loader)
template = env.get_template('test.txt')
excinfo = py.test.raises(ValueError,
template.render, h=h)
for item in excinfo.traceback:
print(item) # XXX: for some reason jinja.Template.render is printed in full
item.source # shouldnt fail
if item.path.basename == 'test.txt':
assert str(item.source) == '{{ h()}}:'
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, py.path.local)
assert path.basename.lower() == "queue.py"
assert path.check()
class TestFormattedExcinfo:
def pytest_funcarg__importasmod(self, request):
def importasmod(source):
source = py.code.Source(source)
tmpdir = request.getfuncargvalue("tmpdir")
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
if invalidate_import_caches is not None:
invalidate_import_caches()
return modpath.pyimport()
return importasmod
def excinfo_from_exec(self, source):
source = py.code.Source(source).strip()
try:
exec (source.compile())
except KeyboardInterrupt:
raise
except:
return py.code.ExceptionInfo()
assert 0, "did not raise"
def test_repr_source(self):
pr = FormattedExcinfo()
source = py.code.Source("""
def f(x):
pass
""").strip()
pr.flow_marker = "|"
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
@broken_on_modern_pytest
def test_repr_source_excinfo(self):
""" check if indentation is right """
pr = FormattedExcinfo()
excinfo = self.excinfo_from_exec("""
def f():
assert 0
f()
""")
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
lines = pr.get_source(source, 1, excinfo)
assert lines == [
' def f():',
'> assert 0',
'E assert 0'
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("""
a = 1
raise ValueError()
""", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self):
pr = FormattedExcinfo()
class FakeCode(object):
class raw:
co_filename = '?'
path = '?'
firstlineno = 5
def fullsource(self):
return None
fullsource = property(fullsource)
class FakeFrame(object):
code = FakeCode()
f_locals = {}
f_globals = {}
class FakeTracebackEntry(py.code.Traceback.Entry):
def __init__(self, tb):
self.lineno = 5+3
@property
def frame(self):
return FakeFrame()
class Traceback(py.code.Traceback):
Entry = FakeTracebackEntry
class FakeExcinfo(py.code.ExceptionInfo):
typename = "Foo"
def __init__(self):
pass
def exconly(self, tryshort):
return "EXC"
def errisinstance(self, cls):
return False
excinfo = FakeExcinfo()
class FakeRawTB(object):
tb_next = None
tb = FakeRawTB()
excinfo.traceback = Traceback(tb)
fail = IOError()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
fail = py.error.ENOENT
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
def test_repr_local(self):
p = FormattedExcinfo(showlocals=True)
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == '__builtins__ = <builtins>'
assert reprlocals.lines[1] == 'x = 3'
assert reprlocals.lines[2] == 'y = 5'
assert reprlocals.lines[3] == 'z = 7'
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter()
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == 'E ValueError: hello'
assert lines[3] == 'E world'
assert not lines[4:]
loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
#assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('m', repr("m"*90))
assert reprfuncargs.args[1] == ('x', '5')
assert reprfuncargs.args[2] == ('y', '13')
assert reprfuncargs.args[3] == ('z', repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "m = " + repr('m' * 90)
assert tw.lines[1] == "x = 5, y = 13"
assert tw.lines[2] == "z = " + repr('z' * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
mod = importasmod("""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('x', repr('a'))
assert reprfuncargs.args[1] == ('y', repr(('b',)))
assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == 'E ValueError: hello'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == 'E ValueError: hello'
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
try:
mod.entry()
except ValueError:
excinfo = py.code.ExceptionInfo()
from py._code.code import Code
monkeypatch.setattr(Code, 'path', 'bogus')
excinfo.traceback[0].frame.code.path = "bogus"
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
monkeypatch.undo()
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == 'E ValueError: hello'
def test_repr_traceback_and_excinfo(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("long", "short"):
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo()
def raiseos():
raise OSError(2)
monkeypatch.setattr('os.getcwd', raiseos)
assert p._makepath(__file__) == __file__
reprtb = p.repr_traceback(excinfo)
@broken_on_modern_pytest
def test_repr_excinfo_addouterr(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
twmock = TWMock()
repr.toterminal(twmock)
assert twmock.lines[-1] == "content"
assert twmock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod("""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
""")
excinfo = py.test.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
@broken_on_modern_pytest
def test_tb_entry_AssertionError(self, importasmod):
# probably this test is a bit redundant
# as py/magic/testing/test_assertion.py
# already tests correctness of
# assertion-reinterpretation logic
mod = importasmod("""
def somefunc():
x = 1
assert x == 2
""")
excinfo = py.test.raises(AssertionError, mod.somefunc)
p = FormattedExcinfo()
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[-1] == "E assert 1 == 2"
def test_reprexcinfo_getrepr(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
try:
mod.entry()
except ValueError:
excinfo = py.code.ExceptionInfo()
for style in ("short", "long", "no"):
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert isinstance(repr, ReprExceptionInfo)
assert repr.reprtraceback.style == style
def test_reprexcinfo_unicode(self):
from py._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw):
tw.line(py.builtin._totext("я", "utf-8"))
x = py.builtin._totext(MyRepr())
assert x == py.builtin._totext("я", "utf-8")
@broken_on_modern_pytest
def test_toterminal_long(self, importasmod):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == " def f():"
assert tw.lines[1] == "> g(3)"
assert tw.lines[2] == ""
assert tw.lines[3].endswith("mod.py:5: ")
assert tw.lines[4] == ("_ ", None)
assert tw.lines[5] == ""
assert tw.lines[6] == " def g(x):"
assert tw.lines[7] == "> raise ValueError(x)"
assert tw.lines[8] == "E ValueError: 3"
assert tw.lines[9] == ""
assert tw.lines[10].endswith("mod.py:3: ValueError")
@broken_on_modern_pytest
def test_toterminal_long_missing_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').remove()
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
@broken_on_modern_pytest
def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').write('asdf')
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
@broken_on_modern_pytest
def test_toterminal_long_filenames(self, importasmod):
mod = importasmod("""
def f():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = TWMock()
path = py.path.local(mod.__file__)
old = path.dirpath().chdir()
try:
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw)
line = tw.lines[-1]
x = py.path.local().bestrelpath(path)
if len(x) < len(str(path)):
assert line == "mod.py:3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw)
line = tw.lines[-1]
assert line == "%s:3: ValueError" %(path,)
finally:
old.chdir()
@pytest.mark.parametrize('style', ("long", "short", "no"))
@pytest.mark.parametrize('showlocals', (True, False),
ids=['locals', 'nolocals'])
@pytest.mark.parametrize('tbfilter', (True, False),
ids=['tbfilter', 'nofilter'])
@pytest.mark.parametrize('funcargs', (True, False),
ids=['funcargs', 'nofuncargs'])
def test_format_excinfo(self, importasmod,
style, showlocals, tbfilter, funcargs):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = py.io.TerminalWriter(stringio=True)
repr = excinfo.getrepr(
style=style,
showlocals=showlocals,
funcargs=funcargs,
tbfilter=tbfilter
)
repr.toterminal(tw)
assert tw.stringio.getvalue()
@broken_on_modern_pytest
def test_native_style(self):
excinfo = self.excinfo_from_exec("""
assert 0
""")
repr = excinfo.getrepr(style='native')
assert "assert 0" in str(repr.reprcrash)
s = str(repr)
assert s.startswith('Traceback (most recent call last):\n File')
assert s.endswith('\nAssertionError: assert 0')
assert 'exec (source.compile())' in s
assert s.count('assert 0') == 2
@broken_on_modern_pytest
def test_traceback_repr_style(self, importasmod):
mod = importasmod("""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
excinfo.traceback[1].set_repr_style("short")
excinfo.traceback[2].set_repr_style("short")
r = excinfo.getrepr(style="long")
tw = TWMock()
r.toterminal(tw)
for line in tw.lines: print (line)
assert tw.lines[0] == ""
assert tw.lines[1] == " def f():"
assert tw.lines[2] == "> g()"
assert tw.lines[3] == ""
assert tw.lines[4].endswith("mod.py:3: ")
assert tw.lines[5] == ("_ ", None)
assert tw.lines[6].endswith("in g")
assert tw.lines[7] == " h()"
assert tw.lines[8].endswith("in h")
assert tw.lines[9] == " i()"
assert tw.lines[10] == ("_ ", None)
assert tw.lines[11] == ""
assert tw.lines[12] == " def i():"
assert tw.lines[13] == "> raise ValueError()"
assert tw.lines[14] == "E ValueError"
assert tw.lines[15] == ""
assert tw.lines[16].endswith("mod.py:9: ValueError")
|
mSenyor/sl4a | refs/heads/master | python/src/Lib/plat-irix5/panel.py | 132 | # Module 'panel'
#
# Support for the Panel library.
# Uses built-in module 'pnl'.
# Applications should use 'panel.function' instead of 'pnl.function';
# most 'pnl' functions are transparently exported by 'panel',
# but dopanel() is overridden and you have to use this version
# if you want to use callbacks.
from warnings import warnpy3k
warnpy3k("the panel module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import pnl
debug = 0
# Test if an object is a list.
#
def is_list(x):
return type(x) == type([])
# Reverse a list.
#
def reverse(list):
res = []
for item in list:
res.insert(0, item)
return res
# Get an attribute of a list, which may itself be another list.
# Don't use 'prop' for name.
#
def getattrlist(list, name):
for item in list:
if item and is_list(item) and item[0] == name:
return item[1:]
return []
# Get a property of a list, which may itself be another list.
#
def getproplist(list, name):
for item in list:
if item and is_list(item) and item[0] == 'prop':
if len(item) > 1 and item[1] == name:
return item[2:]
return []
# Test if an actuator description contains the property 'end-of-group'
#
def is_endgroup(list):
x = getproplist(list, 'end-of-group')
return (x and x[0] == '#t')
# Neatly display an actuator definition given as S-expression
# the prefix string is printed before each line.
#
def show_actuator(prefix, a):
for item in a:
if not is_list(item):
print prefix, item
elif item and item[0] == 'al':
print prefix, 'Subactuator list:'
for a in item[1:]:
show_actuator(prefix + ' ', a)
elif len(item) == 2:
print prefix, item[0], '=>', item[1]
elif len(item) == 3 and item[0] == 'prop':
print prefix, 'Prop', item[1], '=>',
print item[2]
else:
print prefix, '?', item
# Neatly display a panel.
#
def show_panel(prefix, p):
for item in p:
if not is_list(item):
print prefix, item
elif item and item[0] == 'al':
print prefix, 'Actuator list:'
for a in item[1:]:
show_actuator(prefix + ' ', a)
elif len(item) == 2:
print prefix, item[0], '=>', item[1]
elif len(item) == 3 and item[0] == 'prop':
print prefix, 'Prop', item[1], '=>',
print item[2]
else:
print prefix, '?', item
# Exception raised by build_actuator or build_panel.
#
panel_error = 'panel error'
# Dummy callback used to initialize the callbacks.
#
def dummy_callback(arg):
pass
# Assign attributes to members of the target.
# Attribute names in exclist are ignored.
# The member name is the attribute name prefixed with the prefix.
#
def assign_members(target, attrlist, exclist, prefix):
for item in attrlist:
if is_list(item) and len(item) == 2 and item[0] not in exclist:
name, value = item[0], item[1]
ok = 1
if value[0] in '-0123456789':
value = eval(value)
elif value[0] == '"':
value = value[1:-1]
elif value == 'move-then-resize':
# Strange default set by Panel Editor...
ok = 0
else:
print 'unknown value', value, 'for', name
ok = 0
if ok:
lhs = 'target.' + prefix + name
stmt = lhs + '=' + repr(value)
if debug: print 'exec', stmt
try:
exec stmt + '\n'
except KeyboardInterrupt: # Don't catch this!
raise KeyboardInterrupt
except:
print 'assign failed:', stmt
# Build a real actuator from an actuator description.
# Return a pair (actuator, name).
#
def build_actuator(descr):
namelist = getattrlist(descr, 'name')
if namelist:
# Assume it is a string
actuatorname = namelist[0][1:-1]
else:
actuatorname = ''
type = descr[0]
if type[:4] == 'pnl_': type = type[4:]
act = pnl.mkact(type)
act.downfunc = act.activefunc = act.upfunc = dummy_callback
#
assign_members(act, descr[1:], ['al', 'data', 'name'], '')
#
# Treat actuator-specific data
#
datalist = getattrlist(descr, 'data')
prefix = ''
if type[-4:] == 'puck':
prefix = 'puck_'
elif type == 'mouse':
prefix = 'mouse_'
assign_members(act, datalist, [], prefix)
#
return act, actuatorname
# Build all sub-actuators and add them to the super-actuator.
# The super-actuator must already have been added to the panel.
# Sub-actuators with defined names are added as members to the panel
# so they can be referenced as p.name.
#
# Note: I have no idea how panel.endgroup() works when applied
# to a sub-actuator.
#
def build_subactuators(panel, super_act, al):
#
# This is nearly the same loop as below in build_panel(),
# except a call is made to addsubact() instead of addact().
#
for a in al:
act, name = build_actuator(a)
act.addsubact(super_act)
if name:
stmt = 'panel.' + name + ' = act'
if debug: print 'exec', stmt
exec stmt + '\n'
if is_endgroup(a):
panel.endgroup()
sub_al = getattrlist(a, 'al')
if sub_al:
build_subactuators(panel, act, sub_al)
#
# Fix the actuator to which whe just added subactuators.
# This can't hurt (I hope) and is needed for the scroll actuator.
#
super_act.fixact()
# Build a real panel from a panel definition.
# Return a panel object p, where for each named actuator a, p.name is a
# reference to a.
#
def build_panel(descr):
#
# Sanity check
#
if (not descr) or descr[0] != 'panel':
raise panel_error, 'panel description must start with "panel"'
#
if debug: show_panel('', descr)
#
# Create an empty panel
#
panel = pnl.mkpanel()
#
# Assign panel attributes
#
assign_members(panel, descr[1:], ['al'], '')
#
# Look for actuator list
#
al = getattrlist(descr, 'al')
#
# The order in which actuators are created is important
# because of the endgroup() operator.
# Unfortunately the Panel Editor outputs the actuator list
# in reverse order, so we reverse it here.
#
al = reverse(al)
#
for a in al:
act, name = build_actuator(a)
act.addact(panel)
if name:
stmt = 'panel.' + name + ' = act'
exec stmt + '\n'
if is_endgroup(a):
panel.endgroup()
sub_al = getattrlist(a, 'al')
if sub_al:
build_subactuators(panel, act, sub_al)
#
return panel
# Wrapper around pnl.dopanel() which calls call-back functions.
#
def my_dopanel():
# Extract only the first 4 elements to allow for future expansion
a, down, active, up = pnl.dopanel()[:4]
if down:
down.downfunc(down)
if active:
active.activefunc(active)
if up:
up.upfunc(up)
return a
# Create one or more panels from a description file (S-expressions)
# generated by the Panel Editor.
#
def defpanellist(file):
import panelparser
descrlist = panelparser.parse_file(open(file, 'r'))
panellist = []
for descr in descrlist:
panellist.append(build_panel(descr))
return panellist
# Import everything from built-in method pnl, so the user can always
# use panel.foo() instead of pnl.foo().
# This gives *no* performance penalty once this module is imported.
#
from pnl import * # for export
dopanel = my_dopanel # override pnl.dopanel
|
urbn/kombu | refs/heads/master | t/integration/tests/test_qpid.py | 5 | from __future__ import absolute_import, unicode_literals
from funtests import transport
from kombu.tests.case import skip
@skip.unless_module('qpid.messaging')
class test_qpid(transport.TransportCase):
transport = 'qpid'
prefix = 'qpid'
|
ninuxorg/nodeshot | refs/heads/master | nodeshot/community/notifications/admin.py | 5 | from django.contrib import admin
from nodeshot.core.base.admin import BaseAdmin
from .models import * # noqa
from .settings import settings
class NotificationAdmin(BaseAdmin):
list_display = ('to_user', 'type', 'text', 'is_read', 'added', 'updated')
list_filter = ('type', 'is_read', 'added')
raw_id_fields = ('from_user', 'to_user')
autocomplete_lookup_fields = {
'fk': ['from_user', 'to_user'],
}
admin.site.register(Notification, NotificationAdmin)
if 'nodeshot.community.profiles' in settings.INSTALLED_APPS:
class UserWebNotificationSettingsInline(admin.StackedInline):
model = UserWebNotificationSettings
extra = 1
class UserEmailNotificationSettingsInline(admin.StackedInline):
model = UserEmailNotificationSettings
extra = 1
from nodeshot.community.profiles.admin import UserAdmin
additional_inlines = [UserWebNotificationSettingsInline, UserEmailNotificationSettingsInline]
UserAdmin.inlines = UserAdmin.inlines + additional_inlines
|
wd5/jangr | refs/heads/master | documents/admin.py | 1 | from util import admin
from documents.models import *
class DocumentInCollectionInline(admin.TabularInline):
model = DocumentInCollection
ordering = ('order',)
class DocumentAdmin(admin.ModelAdmin):
list_display = ('title','type')
pass
class DocumentCollectionAdmin(admin.ModelAdmin):
list_display = ('title',)
inlines = [ DocumentInCollectionInline ]
pass
admin.site.register(Document, DocumentAdmin)
admin.site.register(DocumentCollection, DocumentCollectionAdmin) |
ftrader-bitcoinunlimited/hardfork_prototype_1_mvf-bu | refs/heads/master | qa/rpc-tests/wallet.py | 2 | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self,bitcoinConfDict=None, wallets=None):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 100)
assert_equal(self.nodes[2].getbalance("from1"), 100-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('90'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
try:
self.nodes[1].gettransaction(cbTxId)
except JSONRPCException as e:
assert("Invalid or non-wallet transaction id" not in e.error['message'])
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
|
0jpq0/kbengine | refs/heads/master | kbe/src/lib/python/PCbuild/field3.py | 233 | # An absurd workaround for the lack of arithmetic in MS's resource compiler.
# After building Python, run this, then paste the output into the appropriate
# part of PC\python_nt.rc.
# Example output:
#
# * For 2.3a0,
# * PY_MICRO_VERSION = 0
# * PY_RELEASE_LEVEL = 'alpha' = 0xA
# * PY_RELEASE_SERIAL = 1
# *
# * and 0*1000 + 10*10 + 1 = 101.
# */
# #define FIELD3 101
import sys
major, minor, micro, level, serial = sys.version_info
levelnum = {'alpha': 0xA,
'beta': 0xB,
'candidate': 0xC,
'final': 0xF,
}[level]
string = sys.version.split()[0] # like '2.3a0'
print(" * For %s," % string)
print(" * PY_MICRO_VERSION = %d" % micro)
print(" * PY_RELEASE_LEVEL = %r = %s" % (level, hex(levelnum)))
print(" * PY_RELEASE_SERIAL = %d" % serial)
print(" *")
field3 = micro * 1000 + levelnum * 10 + serial
print(" * and %d*1000 + %d*10 + %d = %d" % (micro, levelnum, serial, field3))
print(" */")
print("#define FIELD3", field3)
|
robhudson/kuma | refs/heads/master | vendor/packages/pygments/lexers/business.py | 72 | # -*- coding: utf-8 -*-
"""
pygments.lexers.business
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for "business-oriented" languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from pygments.lexers._openedge_builtins import OPENEDGEKEYWORDS
__all__ = ['CobolLexer', 'CobolFreeformatLexer', 'ABAPLexer', 'OpenEdgeLexer',
'GoodDataCLLexer', 'MaqlLexer']
class CobolLexer(RegexLexer):
"""
Lexer for OpenCOBOL code.
.. versionadded:: 1.6
"""
name = 'COBOL'
aliases = ['cobol']
filenames = ['*.cob', '*.COB', '*.cpy', '*.CPY']
mimetypes = ['text/x-cobol']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: by PICTURE and USAGE
# Operators: **, *, +, -, /, <, >, <=, >=, =, <>
# Logical (?): NOT, AND, OR
# Reserved words:
# http://opencobol.add1tocobol.com/#reserved-words
# Intrinsics:
# http://opencobol.add1tocobol.com/#does-opencobol-implement-any-intrinsic-functions
tokens = {
'root': [
include('comment'),
include('strings'),
include('core'),
include('nums'),
(r'[a-z0-9]([\w\-]*[a-z0-9]+)?', Name.Variable),
# (r'[\s]+', Text),
(r'[ \t]+', Text),
],
'comment': [
(r'(^.{6}[*/].*\n|^.{6}|\*>.*\n)', Comment),
],
'core': [
# Figurative constants
(r'(^|(?<=[^0-9a-z_\-]))(ALL\s+)?'
r'((ZEROES)|(HIGH-VALUE|LOW-VALUE|QUOTE|SPACE|ZERO)(S)?)'
r'\s*($|(?=[^0-9a-z_\-]))',
Name.Constant),
# Reserved words STATEMENTS and other bolds
(words((
'ACCEPT', 'ADD', 'ALLOCATE', 'CALL', 'CANCEL', 'CLOSE', 'COMPUTE',
'CONFIGURATION', 'CONTINUE', 'DATA', 'DELETE', 'DISPLAY', 'DIVIDE',
'DIVISION', 'ELSE', 'END', 'END-ACCEPT',
'END-ADD', 'END-CALL', 'END-COMPUTE', 'END-DELETE', 'END-DISPLAY',
'END-DIVIDE', 'END-EVALUATE', 'END-IF', 'END-MULTIPLY', 'END-OF-PAGE',
'END-PERFORM', 'END-READ', 'END-RETURN', 'END-REWRITE', 'END-SEARCH',
'END-START', 'END-STRING', 'END-SUBTRACT', 'END-UNSTRING', 'END-WRITE',
'ENVIRONMENT', 'EVALUATE', 'EXIT', 'FD', 'FILE', 'FILE-CONTROL', 'FOREVER',
'FREE', 'GENERATE', 'GO', 'GOBACK', 'IDENTIFICATION', 'IF', 'INITIALIZE',
'INITIATE', 'INPUT-OUTPUT', 'INSPECT', 'INVOKE', 'I-O-CONTROL', 'LINKAGE',
'LOCAL-STORAGE', 'MERGE', 'MOVE', 'MULTIPLY', 'OPEN', 'PERFORM',
'PROCEDURE', 'PROGRAM-ID', 'RAISE', 'READ', 'RELEASE', 'RESUME',
'RETURN', 'REWRITE', 'SCREEN', 'SD', 'SEARCH', 'SECTION', 'SET',
'SORT', 'START', 'STOP', 'STRING', 'SUBTRACT', 'SUPPRESS',
'TERMINATE', 'THEN', 'UNLOCK', 'UNSTRING', 'USE', 'VALIDATE',
'WORKING-STORAGE', 'WRITE'), prefix=r'(^|(?<=[^0-9a-z_\-]))',
suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
Keyword.Reserved),
# Reserved words
(words((
'ACCESS', 'ADDRESS', 'ADVANCING', 'AFTER', 'ALL',
'ALPHABET', 'ALPHABETIC', 'ALPHABETIC-LOWER', 'ALPHABETIC-UPPER',
'ALPHANUMERIC', 'ALPHANUMERIC-EDITED', 'ALSO', 'ALTER', 'ALTERNATE'
'ANY', 'ARE', 'AREA', 'AREAS', 'ARGUMENT-NUMBER', 'ARGUMENT-VALUE', 'AS',
'ASCENDING', 'ASSIGN', 'AT', 'AUTO', 'AUTO-SKIP', 'AUTOMATIC', 'AUTOTERMINATE',
'BACKGROUND-COLOR', 'BASED', 'BEEP', 'BEFORE', 'BELL',
'BLANK', 'BLINK', 'BLOCK', 'BOTTOM', 'BY', 'BYTE-LENGTH', 'CHAINING',
'CHARACTER', 'CHARACTERS', 'CLASS', 'CODE', 'CODE-SET', 'COL', 'COLLATING',
'COLS', 'COLUMN', 'COLUMNS', 'COMMA', 'COMMAND-LINE', 'COMMIT', 'COMMON',
'CONSTANT', 'CONTAINS', 'CONTENT', 'CONTROL',
'CONTROLS', 'CONVERTING', 'COPY', 'CORR', 'CORRESPONDING', 'COUNT', 'CRT',
'CURRENCY', 'CURSOR', 'CYCLE', 'DATE', 'DAY', 'DAY-OF-WEEK', 'DE', 'DEBUGGING',
'DECIMAL-POINT', 'DECLARATIVES', 'DEFAULT', 'DELIMITED',
'DELIMITER', 'DEPENDING', 'DESCENDING', 'DETAIL', 'DISK',
'DOWN', 'DUPLICATES', 'DYNAMIC', 'EBCDIC',
'ENTRY', 'ENVIRONMENT-NAME', 'ENVIRONMENT-VALUE', 'EOL', 'EOP',
'EOS', 'ERASE', 'ERROR', 'ESCAPE', 'EXCEPTION',
'EXCLUSIVE', 'EXTEND', 'EXTERNAL',
'FILE-ID', 'FILLER', 'FINAL', 'FIRST', 'FIXED', 'FLOAT-LONG', 'FLOAT-SHORT',
'FOOTING', 'FOR', 'FOREGROUND-COLOR', 'FORMAT', 'FROM', 'FULL', 'FUNCTION',
'FUNCTION-ID', 'GIVING', 'GLOBAL', 'GROUP',
'HEADING', 'HIGHLIGHT', 'I-O', 'ID',
'IGNORE', 'IGNORING', 'IN', 'INDEX', 'INDEXED', 'INDICATE',
'INITIAL', 'INITIALIZED', 'INPUT',
'INTO', 'INTRINSIC', 'INVALID', 'IS', 'JUST', 'JUSTIFIED', 'KEY', 'LABEL',
'LAST', 'LEADING', 'LEFT', 'LENGTH', 'LIMIT', 'LIMITS', 'LINAGE',
'LINAGE-COUNTER', 'LINE', 'LINES', 'LOCALE', 'LOCK',
'LOWLIGHT', 'MANUAL', 'MEMORY', 'MINUS', 'MODE',
'MULTIPLE', 'NATIONAL', 'NATIONAL-EDITED', 'NATIVE',
'NEGATIVE', 'NEXT', 'NO', 'NULL', 'NULLS', 'NUMBER', 'NUMBERS', 'NUMERIC',
'NUMERIC-EDITED', 'OBJECT-COMPUTER', 'OCCURS', 'OF', 'OFF', 'OMITTED', 'ON', 'ONLY',
'OPTIONAL', 'ORDER', 'ORGANIZATION', 'OTHER', 'OUTPUT', 'OVERFLOW',
'OVERLINE', 'PACKED-DECIMAL', 'PADDING', 'PAGE', 'PARAGRAPH',
'PLUS', 'POINTER', 'POSITION', 'POSITIVE', 'PRESENT', 'PREVIOUS',
'PRINTER', 'PRINTING', 'PROCEDURE-POINTER', 'PROCEDURES',
'PROCEED', 'PROGRAM', 'PROGRAM-POINTER', 'PROMPT', 'QUOTE',
'QUOTES', 'RANDOM', 'RD', 'RECORD', 'RECORDING', 'RECORDS', 'RECURSIVE',
'REDEFINES', 'REEL', 'REFERENCE', 'RELATIVE', 'REMAINDER', 'REMOVAL',
'RENAMES', 'REPLACING', 'REPORT', 'REPORTING', 'REPORTS', 'REPOSITORY',
'REQUIRED', 'RESERVE', 'RETURNING', 'REVERSE-VIDEO', 'REWIND',
'RIGHT', 'ROLLBACK', 'ROUNDED', 'RUN', 'SAME', 'SCROLL',
'SECURE', 'SEGMENT-LIMIT', 'SELECT', 'SENTENCE', 'SEPARATE',
'SEQUENCE', 'SEQUENTIAL', 'SHARING', 'SIGN', 'SIGNED', 'SIGNED-INT',
'SIGNED-LONG', 'SIGNED-SHORT', 'SIZE', 'SORT-MERGE', 'SOURCE',
'SOURCE-COMPUTER', 'SPECIAL-NAMES', 'STANDARD',
'STANDARD-1', 'STANDARD-2', 'STATUS', 'SUM',
'SYMBOLIC', 'SYNC', 'SYNCHRONIZED', 'TALLYING', 'TAPE',
'TEST', 'THROUGH', 'THRU', 'TIME', 'TIMES', 'TO', 'TOP', 'TRAILING',
'TRANSFORM', 'TYPE', 'UNDERLINE', 'UNIT', 'UNSIGNED',
'UNSIGNED-INT', 'UNSIGNED-LONG', 'UNSIGNED-SHORT', 'UNTIL', 'UP',
'UPDATE', 'UPON', 'USAGE', 'USING', 'VALUE', 'VALUES', 'VARYING',
'WAIT', 'WHEN', 'WITH', 'WORDS', 'YYYYDDD', 'YYYYMMDD'),
prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
Keyword.Pseudo),
# inactive reserved words
(words((
'ACTIVE-CLASS', 'ALIGNED', 'ANYCASE', 'ARITHMETIC', 'ATTRIBUTE', 'B-AND',
'B-NOT', 'B-OR', 'B-XOR', 'BIT', 'BOOLEAN', 'CD', 'CENTER', 'CF', 'CH', 'CHAIN', 'CLASS-ID',
'CLASSIFICATION', 'COMMUNICATION', 'CONDITION', 'DATA-POINTER',
'DESTINATION', 'DISABLE', 'EC', 'EGI', 'EMI', 'ENABLE', 'END-RECEIVE',
'ENTRY-CONVENTION', 'EO', 'ESI', 'EXCEPTION-OBJECT', 'EXPANDS', 'FACTORY',
'FLOAT-BINARY-16', 'FLOAT-BINARY-34', 'FLOAT-BINARY-7',
'FLOAT-DECIMAL-16', 'FLOAT-DECIMAL-34', 'FLOAT-EXTENDED', 'FORMAT',
'FUNCTION-POINTER', 'GET', 'GROUP-USAGE', 'IMPLEMENTS', 'INFINITY',
'INHERITS', 'INTERFACE', 'INTERFACE-ID', 'INVOKE', 'LC_ALL', 'LC_COLLATE',
'LC_CTYPE', 'LC_MESSAGES', 'LC_MONETARY', 'LC_NUMERIC', 'LC_TIME',
'LINE-COUNTER', 'MESSAGE', 'METHOD', 'METHOD-ID', 'NESTED', 'NONE', 'NORMAL',
'OBJECT', 'OBJECT-REFERENCE', 'OPTIONS', 'OVERRIDE', 'PAGE-COUNTER', 'PF', 'PH',
'PROPERTY', 'PROTOTYPE', 'PURGE', 'QUEUE', 'RAISE', 'RAISING', 'RECEIVE',
'RELATION', 'REPLACE', 'REPRESENTS-NOT-A-NUMBER', 'RESET', 'RESUME', 'RETRY',
'RF', 'RH', 'SECONDS', 'SEGMENT', 'SELF', 'SEND', 'SOURCES', 'STATEMENT', 'STEP',
'STRONG', 'SUB-QUEUE-1', 'SUB-QUEUE-2', 'SUB-QUEUE-3', 'SUPER', 'SYMBOL',
'SYSTEM-DEFAULT', 'TABLE', 'TERMINAL', 'TEXT', 'TYPEDEF', 'UCS-4', 'UNIVERSAL',
'USER-DEFAULT', 'UTF-16', 'UTF-8', 'VAL-STATUS', 'VALID', 'VALIDATE',
'VALIDATE-STATUS'),
prefix=r'(^|(?<=[^0-9a-z_\-]))', suffix=r'\s*($|(?=[^0-9a-z_\-]))'),
Error),
# Data Types
(r'(^|(?<=[^0-9a-z_\-]))'
r'(PIC\s+.+?(?=(\s|\.\s))|PICTURE\s+.+?(?=(\s|\.\s))|'
r'(COMPUTATIONAL)(-[1-5X])?|(COMP)(-[1-5X])?|'
r'BINARY-C-LONG|'
r'BINARY-CHAR|BINARY-DOUBLE|BINARY-LONG|BINARY-SHORT|'
r'BINARY)\s*($|(?=[^0-9a-z_\-]))', Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|/|<=|>=|<|>|==|/=|=)', Operator),
# (r'(::)', Keyword.Declaration),
(r'([(),;:&%.])', Punctuation),
# Intrinsics
(r'(^|(?<=[^0-9a-z_\-]))(ABS|ACOS|ANNUITY|ASIN|ATAN|BYTE-LENGTH|'
r'CHAR|COMBINED-DATETIME|CONCATENATE|COS|CURRENT-DATE|'
r'DATE-OF-INTEGER|DATE-TO-YYYYMMDD|DAY-OF-INTEGER|DAY-TO-YYYYDDD|'
r'EXCEPTION-(?:FILE|LOCATION|STATEMENT|STATUS)|EXP10|EXP|E|'
r'FACTORIAL|FRACTION-PART|INTEGER-OF-(?:DATE|DAY|PART)|INTEGER|'
r'LENGTH|LOCALE-(?:DATE|TIME(?:-FROM-SECONDS)?)|LOG(?:10)?|'
r'LOWER-CASE|MAX|MEAN|MEDIAN|MIDRANGE|MIN|MOD|NUMVAL(?:-C)?|'
r'ORD(?:-MAX|-MIN)?|PI|PRESENT-VALUE|RANDOM|RANGE|REM|REVERSE|'
r'SECONDS-FROM-FORMATTED-TIME|SECONDS-PAST-MIDNIGHT|SIGN|SIN|SQRT|'
r'STANDARD-DEVIATION|STORED-CHAR-LENGTH|SUBSTITUTE(?:-CASE)?|'
r'SUM|TAN|TEST-DATE-YYYYMMDD|TEST-DAY-YYYYDDD|TRIM|'
r'UPPER-CASE|VARIANCE|WHEN-COMPILED|YEAR-TO-YYYY)\s*'
r'($|(?=[^0-9a-z_\-]))', Name.Function),
# Booleans
(r'(^|(?<=[^0-9a-z_\-]))(true|false)\s*($|(?=[^0-9a-z_\-]))', Name.Builtin),
# Comparing Operators
(r'(^|(?<=[^0-9a-z_\-]))(equal|equals|ne|lt|le|gt|ge|'
r'greater|less|than|not|and|or)\s*($|(?=[^0-9a-z_\-]))', Operator.Word),
],
# \"[^\"\n]*\"|\'[^\'\n]*\'
'strings': [
# apparently strings can be delimited by EOL if they are continued
# in the next line
(r'"[^"\n]*("|\n)', String.Double),
(r"'[^'\n]*('|\n)", String.Single),
],
'nums': [
(r'\d+(\s*|\.$|$)', Number.Integer),
(r'[+-]?\d*\.\d+(E[-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*(E[-+]?\d+)?', Number.Float),
],
}
class CobolFreeformatLexer(CobolLexer):
"""
Lexer for Free format OpenCOBOL code.
.. versionadded:: 1.6
"""
name = 'COBOLFree'
aliases = ['cobolfree']
filenames = ['*.cbl', '*.CBL']
mimetypes = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'comment': [
(r'(\*>.*\n|^\w*\*.*$)', Comment),
],
}
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
.. versionadded:: 1.1
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Text),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
],
'variable-names': [
(r'<\S+>', Name.Variable),
(r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
# function calls
(r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)',
bygroups(Keyword, Text, Name.Function)),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Function)),
(r'(PERFORM)(\s+)(\()(\w+)(\))',
bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation)),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Text, Name.Function, Text, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w~]+)',
bygroups(Keyword, Text, Name.Function)),
# method calls
(r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)',
bygroups(Text, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
# first in the list of keywords.
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS'
r')\b', Keyword),
# keyword kombinations
(r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
# simple kombinations
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword),
# single word keywords.
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|'
r'ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|'
r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|'
r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|'
r'NODES|'
r'OBLIGATORY|OF|OFF|ON|OVERLAY|'
r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|'
r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|'
r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|'
r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|'
r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|'
r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword),
# builtins
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
# operators which look like variable names before
# parsing variable names.
(r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator),
include('variable-names'),
# standard oparators after variable names,
# because < and > are part of field symbols.
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Single),
(r'[/;:()\[\],.]', Punctuation)
],
}
class OpenEdgeLexer(RegexLexer):
"""
Lexer for `OpenEdge ABL (formerly Progress)
<http://web.progress.com/en/openedge/abl.html>`_ source code.
.. versionadded:: 1.5
"""
name = 'OpenEdge ABL'
aliases = ['openedge', 'abl', 'progress']
filenames = ['*.p', '*.cls']
mimetypes = ['text/x-openedge', 'application/x-openedge']
types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|'
r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|'
r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|'
r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|'
r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))')
keywords = words(OPENEDGEKEYWORDS,
prefix=r'(?i)(^|(?<=[^0-9a-z_\-]))',
suffix=r'\s*($|(?=[^0-9a-z_\-]))')
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'comment'),
(r'\{', Comment.Preproc, 'preprocessor'),
(r'\s*&.*', Comment.Preproc),
(r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration),
(types, Keyword.Type),
(keywords, Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\s+', Text),
(r'[+*/=-]', Operator),
(r'[.:()]', Punctuation),
(r'.', Name.Variable), # Lazy catch-all
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'preprocessor': [
(r'[^{}]', Comment.Preproc),
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
],
}
class GoodDataCLLexer(RegexLexer):
"""
Lexer for `GoodData-CL
<http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/\
com/gooddata/processor/COMMANDS.txt>`_
script files.
.. versionadded:: 1.4
"""
name = 'GoodData-CL'
aliases = ['gooddata-cl']
filenames = ['*.gdc']
mimetypes = ['text/x-gooddata-cl']
flags = re.IGNORECASE
tokens = {
'root': [
# Comments
(r'#.*', Comment.Single),
# Function call
(r'[a-z]\w*', Name.Function),
# Argument list
(r'\(', Punctuation, 'args-list'),
# Punctuation
(r';', Punctuation),
# Space is not significant
(r'\s+', Text)
],
'args-list': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'[a-z]\w*', Name.Variable),
(r'=', Operator),
(r'"', String, 'string-literal'),
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# Space is not significant
(r'\s', Text)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
]
}
class MaqlLexer(RegexLexer):
"""
Lexer for `GoodData MAQL
<https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_
scripts.
.. versionadded:: 1.4
"""
name = 'MAQL'
aliases = ['maql']
filenames = ['*.maql']
mimetypes = ['text/x-gooddata-maql', 'application/x-gooddata-maql']
flags = re.IGNORECASE
tokens = {
'root': [
# IDENTITY
(r'IDENTIFIER\b', Name.Builtin),
# IDENTIFIER
(r'\{[^}]+\}', Name.Variable),
# NUMBER
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# STRING
(r'"', String, 'string-literal'),
# RELATION
(r'\<\>|\!\=', Operator),
(r'\=|\>\=|\>|\<\=|\<', Operator),
# :=
(r'\:\=', Operator),
# OBJECT
(r'\[[^]]+\]', Name.Variable.Class),
# keywords
(words((
'DIMENSION', 'DIMENSIONS', 'BOTTOM', 'METRIC', 'COUNT', 'OTHER',
'FACT', 'WITH', 'TOP', 'OR', 'ATTRIBUTE', 'CREATE', 'PARENT',
'FALSE', 'ROW', 'ROWS', 'FROM', 'ALL', 'AS', 'PF', 'COLUMN',
'COLUMNS', 'DEFINE', 'REPORT', 'LIMIT', 'TABLE', 'LIKE', 'AND',
'BY', 'BETWEEN', 'EXCEPT', 'SELECT', 'MATCH', 'WHERE', 'TRUE',
'FOR', 'IN', 'WITHOUT', 'FILTER', 'ALIAS', 'WHEN', 'NOT', 'ON',
'KEYS', 'KEY', 'FULLSET', 'PRIMARY', 'LABELS', 'LABEL',
'VISUAL', 'TITLE', 'DESCRIPTION', 'FOLDER', 'ALTER', 'DROP',
'ADD', 'DATASET', 'DATATYPE', 'INT', 'BIGINT', 'DOUBLE', 'DATE',
'VARCHAR', 'DECIMAL', 'SYNCHRONIZE', 'TYPE', 'DEFAULT', 'ORDER',
'ASC', 'DESC', 'HYPERLINK', 'INCLUDE', 'TEMPLATE', 'MODIFY'),
suffix=r'\b'),
Keyword),
# FUNCNAME
(r'[a-z]\w*\b', Name.Function),
# Comments
(r'#.*', Comment.Single),
# Punctuation
(r'[,;()]', Punctuation),
# Space is not significant
(r'\s+', Text)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
],
}
|
joshowen/django-allauth | refs/heads/master | allauth/socialaccount/providers/google/__init__.py | 12133432 | |
sodafree/backend | refs/heads/master | build/lib.linux-i686-2.7/django/db/backends/postgresql_psycopg2/__init__.py | 12133432 | |
initNirvana/Easyphotos | refs/heads/master | env/lib/python3.4/site-packages/flask/testsuite/test_apps/blueprintapp/__init__.py | 629 | from flask import Flask
app = Flask(__name__)
from blueprintapp.apps.admin import admin
from blueprintapp.apps.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
|
nanolearningllc/edx-platform-cypress | refs/heads/master | common/test/acceptance/accessibility/test_studio_library_axs.py | 71 | """
Accessibility tests for Studio Library pages.
Run just this test with:
SELENIUM_BROWSER=phantomjs paver test_bokchoy -d accessibility -t test_studio_library_axs.py
"""
from ..tests.studio.base_studio_test import StudioLibraryTest
from ..pages.studio.library import LibraryEditPage
class StudioLibraryAxsTest(StudioLibraryTest):
"""
Class to test Studio pages accessibility.
"""
def test_lib_edit_page_axs(self):
"""
Check accessibility of LibraryEditPage.
"""
lib_page = LibraryEditPage(self.browser, self.library_key)
lib_page.visit()
lib_page.wait_until_ready()
report = lib_page.do_axs_audit()
# There was one page in this session
self.assertEqual(1, len(report))
result = report[0]
# Verify that this page has no accessibility errors.
self.assertEqual(0, len(result.errors))
# Verify that this page currently has 3 accessibility warnings.
self.assertEqual(3, len(result.warnings))
# And that these are the warnings that the page currently gives.
for warning in result.warnings:
self.assertTrue(
warning.startswith(('Warning: AX_FOCUS_01', 'Warning: AX_COLOR_01', 'Warning: AX_IMAGE_01',)),
msg="Unexpected warning: {}".format(warning))
|
Danielhiversen/home-assistant | refs/heads/master | homeassistant/components/device_tracker/__init__.py | 2 | """
Provide functionality to keep track of devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/device_tracker/
"""
import asyncio
from datetime import timedelta
import logging
from typing import Any, List, Sequence, Callable
import voluptuous as vol
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from homeassistant.components import group, zone
from homeassistant.components.group import (
ATTR_ADD_ENTITIES, ATTR_ENTITIES, ATTR_OBJECT_ID, ATTR_VISIBLE,
DOMAIN as DOMAIN_GROUP, SERVICE_SET)
from homeassistant.components.zone.zone import async_active_zone
from homeassistant.config import load_yaml_config_file, async_log_exception
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.restore_state import async_get_last_state
from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType
import homeassistant.helpers.config_validation as cv
from homeassistant import util
from homeassistant.util.async_ import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.yaml import dump
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_GPS_ACCURACY, ATTR_ICON, ATTR_LATITUDE,
ATTR_LONGITUDE, ATTR_NAME, CONF_ICON, CONF_MAC, CONF_NAME,
DEVICE_DEFAULT_NAME, STATE_NOT_HOME, STATE_HOME)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'device_tracker'
DEPENDENCIES = ['zone', 'group']
GROUP_NAME_ALL_DEVICES = 'all devices'
ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format('all_devices')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
YAML_DEVICES = 'known_devices.yaml'
CONF_TRACK_NEW = 'track_new_devices'
DEFAULT_TRACK_NEW = True
CONF_NEW_DEVICE_DEFAULTS = 'new_device_defaults'
CONF_CONSIDER_HOME = 'consider_home'
DEFAULT_CONSIDER_HOME = timedelta(seconds=180)
CONF_SCAN_INTERVAL = 'interval_seconds'
DEFAULT_SCAN_INTERVAL = timedelta(seconds=12)
CONF_AWAY_HIDE = 'hide_if_away'
DEFAULT_AWAY_HIDE = False
EVENT_NEW_DEVICE = 'device_tracker_new_device'
SERVICE_SEE = 'see'
ATTR_ATTRIBUTES = 'attributes'
ATTR_BATTERY = 'battery'
ATTR_DEV_ID = 'dev_id'
ATTR_GPS = 'gps'
ATTR_HOST_NAME = 'host_name'
ATTR_LOCATION_NAME = 'location_name'
ATTR_MAC = 'mac'
ATTR_SOURCE_TYPE = 'source_type'
ATTR_CONSIDER_HOME = 'consider_home'
SOURCE_TYPE_GPS = 'gps'
SOURCE_TYPE_ROUTER = 'router'
SOURCE_TYPE_BLUETOOTH = 'bluetooth'
SOURCE_TYPE_BLUETOOTH_LE = 'bluetooth_le'
SOURCE_TYPES = (SOURCE_TYPE_GPS, SOURCE_TYPE_ROUTER,
SOURCE_TYPE_BLUETOOTH, SOURCE_TYPE_BLUETOOTH_LE)
NEW_DEVICE_DEFAULTS_SCHEMA = vol.Any(None, vol.Schema({
vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean,
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
}))
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL): cv.time_period,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CONSIDER_HOME,
default=DEFAULT_CONSIDER_HOME): vol.All(
cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_NEW_DEVICE_DEFAULTS,
default={}): NEW_DEVICE_DEFAULTS_SCHEMA
})
SERVICE_SEE_PAYLOAD_SCHEMA = vol.Schema(vol.All(
cv.has_at_least_one_key(ATTR_MAC, ATTR_DEV_ID), {
ATTR_MAC: cv.string,
ATTR_DEV_ID: cv.string,
ATTR_HOST_NAME: cv.string,
ATTR_LOCATION_NAME: cv.string,
ATTR_GPS: cv.gps,
ATTR_GPS_ACCURACY: cv.positive_int,
ATTR_BATTERY: cv.positive_int,
ATTR_ATTRIBUTES: dict,
ATTR_SOURCE_TYPE: vol.In(SOURCE_TYPES),
ATTR_CONSIDER_HOME: cv.time_period,
# Temp workaround for iOS app introduced in 0.65
vol.Optional('battery_status'): str,
vol.Optional('hostname'): str,
}))
@bind_hass
def is_on(hass: HomeAssistantType, entity_id: str = None):
"""Return the state if any or a specified device is home."""
entity = entity_id or ENTITY_ID_ALL_DEVICES
return hass.states.is_state(entity, STATE_HOME)
def see(hass: HomeAssistantType, mac: str = None, dev_id: str = None,
host_name: str = None, location_name: str = None,
gps: GPSType = None, gps_accuracy=None,
battery: int = None, attributes: dict = None):
"""Call service to notify you see device."""
data = {key: value for key, value in
((ATTR_MAC, mac),
(ATTR_DEV_ID, dev_id),
(ATTR_HOST_NAME, host_name),
(ATTR_LOCATION_NAME, location_name),
(ATTR_GPS, gps),
(ATTR_GPS_ACCURACY, gps_accuracy),
(ATTR_BATTERY, battery)) if value is not None}
if attributes:
data[ATTR_ATTRIBUTES] = attributes
hass.services.call(DOMAIN, SERVICE_SEE, data)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the device tracker."""
yaml_path = hass.config.path(YAML_DEVICES)
conf = config.get(DOMAIN, [])
conf = conf[0] if conf else {}
consider_home = conf.get(CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME)
defaults = conf.get(CONF_NEW_DEVICE_DEFAULTS, {})
track_new = conf.get(CONF_TRACK_NEW)
if track_new is None:
track_new = defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
devices = await async_load_config(yaml_path, hass, consider_home)
tracker = DeviceTracker(
hass, consider_home, track_new, defaults, devices)
async def async_setup_platform(p_type, p_config, disc_info=None):
"""Set up a device tracker platform."""
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
scanner = None
setup = None
if hasattr(platform, 'async_get_scanner'):
scanner = await platform.async_get_scanner(
hass, {DOMAIN: p_config})
elif hasattr(platform, 'get_scanner'):
scanner = await hass.async_add_job(
platform.get_scanner, hass, {DOMAIN: p_config})
elif hasattr(platform, 'async_setup_scanner'):
setup = await platform.async_setup_scanner(
hass, p_config, tracker.async_see, disc_info)
elif hasattr(platform, 'setup_scanner'):
setup = await hass.async_add_job(
platform.setup_scanner, hass, p_config, tracker.see,
disc_info)
else:
raise HomeAssistantError("Invalid device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, p_config, scanner, tracker.async_see, p_type)
return
if not setup:
_LOGGER.error("Error setting up platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
await asyncio.wait(setup_tasks, loop=hass.loop)
tracker.async_setup_group()
async def async_platform_discovered(platform, info):
"""Load a platform."""
await async_setup_platform(platform, {}, disc_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
# Clean up stale devices
async_track_utc_time_change(
hass, tracker.async_update_stale, second=range(0, 60, 5))
async def async_see_service(call):
"""Service to see a device."""
# Temp workaround for iOS, introduced in 0.65
data = dict(call.data)
data.pop('hostname', None)
data.pop('battery_status', None)
await tracker.async_see(**data)
hass.services.async_register(
DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA)
# restore
await tracker.async_setup_tracked_device()
return True
class DeviceTracker:
"""Representation of a device tracker."""
def __init__(self, hass: HomeAssistantType, consider_home: timedelta,
track_new: bool, defaults: dict,
devices: Sequence) -> None:
"""Initialize a device tracker."""
self.hass = hass
self.devices = {dev.dev_id: dev for dev in devices}
self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac}
self.consider_home = consider_home
self.track_new = track_new if track_new is not None \
else defaults.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
self.defaults = defaults
self.group = None
self._is_updating = asyncio.Lock(loop=hass.loop)
for dev in devices:
if self.devices[dev.dev_id] is not dev:
_LOGGER.warning('Duplicate device IDs detected %s', dev.dev_id)
if dev.mac and self.mac_to_dev[dev.mac] is not dev:
_LOGGER.warning('Duplicate device MAC addresses detected %s',
dev.mac)
def see(self, mac: str = None, dev_id: str = None, host_name: str = None,
location_name: str = None, gps: GPSType = None,
gps_accuracy: int = None, battery: int = None,
attributes: dict = None, source_type: str = SOURCE_TYPE_GPS,
picture: str = None, icon: str = None,
consider_home: timedelta = None):
"""Notify the device tracker that you see a device."""
self.hass.add_job(
self.async_see(mac, dev_id, host_name, location_name, gps,
gps_accuracy, battery, attributes, source_type,
picture, icon, consider_home)
)
async def async_see(
self, mac: str = None, dev_id: str = None, host_name: str = None,
location_name: str = None, gps: GPSType = None,
gps_accuracy: int = None, battery: int = None,
attributes: dict = None, source_type: str = SOURCE_TYPE_GPS,
picture: str = None, icon: str = None,
consider_home: timedelta = None):
"""Notify the device tracker that you see a device.
This method is a coroutine.
"""
if mac is None and dev_id is None:
raise HomeAssistantError('Neither mac or device id passed in')
elif mac is not None:
mac = str(mac).upper()
device = self.mac_to_dev.get(mac)
if not device:
dev_id = util.slugify(host_name or '') or util.slugify(mac)
else:
dev_id = cv.slug(str(dev_id).lower())
device = self.devices.get(dev_id)
if device:
await device.async_seen(
host_name, location_name, gps, gps_accuracy, battery,
attributes, source_type, consider_home)
if device.track:
await device.async_update_ha_state()
return
# If no device can be found, create it
dev_id = util.ensure_unique_string(dev_id, self.devices.keys())
device = Device(
self.hass, consider_home or self.consider_home, self.track_new,
dev_id, mac, (host_name or dev_id).replace('_', ' '),
picture=picture, icon=icon,
hide_if_away=self.defaults.get(CONF_AWAY_HIDE, DEFAULT_AWAY_HIDE))
self.devices[dev_id] = device
if mac is not None:
self.mac_to_dev[mac] = device
await device.async_seen(
host_name, location_name, gps, gps_accuracy, battery, attributes,
source_type)
if device.track:
await device.async_update_ha_state()
# During init, we ignore the group
if self.group and self.track_new:
self.hass.async_create_task(
self.hass.async_call(
DOMAIN_GROUP, SERVICE_SET, {
ATTR_OBJECT_ID: util.slugify(GROUP_NAME_ALL_DEVICES),
ATTR_VISIBLE: False,
ATTR_NAME: GROUP_NAME_ALL_DEVICES,
ATTR_ADD_ENTITIES: [device.entity_id]}))
self.hass.bus.async_fire(EVENT_NEW_DEVICE, {
ATTR_ENTITY_ID: device.entity_id,
ATTR_HOST_NAME: device.host_name,
ATTR_MAC: device.mac,
})
# update known_devices.yaml
self.hass.async_create_task(
self.async_update_config(
self.hass.config.path(YAML_DEVICES), dev_id, device)
)
async def async_update_config(self, path, dev_id, device):
"""Add device to YAML configuration file.
This method is a coroutine.
"""
async with self._is_updating:
await self.hass.async_add_executor_job(
update_config, self.hass.config.path(YAML_DEVICES),
dev_id, device)
@callback
def async_setup_group(self):
"""Initialize group for all tracked devices.
This method must be run in the event loop.
"""
entity_ids = [dev.entity_id for dev in self.devices.values()
if dev.track]
self.hass.async_create_task(
self.hass.services.async_call(
DOMAIN_GROUP, SERVICE_SET, {
ATTR_OBJECT_ID: util.slugify(GROUP_NAME_ALL_DEVICES),
ATTR_VISIBLE: False,
ATTR_NAME: GROUP_NAME_ALL_DEVICES,
ATTR_ENTITIES: entity_ids}))
@callback
def async_update_stale(self, now: dt_util.dt.datetime):
"""Update stale devices.
This method must be run in the event loop.
"""
for device in self.devices.values():
if (device.track and device.last_update_home) and \
device.stale(now):
self.hass.async_create_task(device.async_update_ha_state(True))
async def async_setup_tracked_device(self):
"""Set up all not exists tracked devices.
This method is a coroutine.
"""
async def async_init_single_device(dev):
"""Init a single device_tracker entity."""
await dev.async_added_to_hass()
await dev.async_update_ha_state()
tasks = []
for device in self.devices.values():
if device.track and not device.last_seen:
tasks.append(self.hass.async_create_task(
async_init_single_device(device)))
if tasks:
await asyncio.wait(tasks, loop=self.hass.loop)
class Device(Entity):
"""Represent a tracked device."""
host_name = None # type: str
location_name = None # type: str
gps = None # type: GPSType
gps_accuracy = 0 # type: int
last_seen = None # type: dt_util.dt.datetime
consider_home = None # type: dt_util.dt.timedelta
battery = None # type: int
attributes = None # type: dict
icon = None # type: str
# Track if the last update of this device was HOME.
last_update_home = False
_state = STATE_NOT_HOME
def __init__(self, hass: HomeAssistantType, consider_home: timedelta,
track: bool, dev_id: str, mac: str, name: str = None,
picture: str = None, gravatar: str = None, icon: str = None,
hide_if_away: bool = False) -> None:
"""Initialize a device."""
self.hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(dev_id)
# Timedelta object how long we consider a device home if it is not
# detected anymore.
self.consider_home = consider_home
# Device ID
self.dev_id = dev_id
self.mac = mac
# If we should track this device
self.track = track
# Configured name
self.config_name = name
# Configured picture
if gravatar is not None:
self.config_picture = get_gravatar_for_email(gravatar)
else:
self.config_picture = picture
self.icon = icon
self.away_hide = hide_if_away
self.source_type = None
self._attributes = {}
@property
def name(self):
"""Return the name of the entity."""
return self.config_name or self.host_name or DEVICE_DEFAULT_NAME
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def entity_picture(self):
"""Return the picture of the device."""
return self.config_picture
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {
ATTR_SOURCE_TYPE: self.source_type
}
if self.gps:
attr[ATTR_LATITUDE] = self.gps[0]
attr[ATTR_LONGITUDE] = self.gps[1]
attr[ATTR_GPS_ACCURACY] = self.gps_accuracy
if self.battery:
attr[ATTR_BATTERY] = self.battery
return attr
@property
def device_state_attributes(self):
"""Return device state attributes."""
return self._attributes
@property
def hidden(self):
"""If device should be hidden."""
return self.away_hide and self.state != STATE_HOME
async def async_seen(
self, host_name: str = None, location_name: str = None,
gps: GPSType = None, gps_accuracy=0, battery: int = None,
attributes: dict = None,
source_type: str = SOURCE_TYPE_GPS,
consider_home: timedelta = None):
"""Mark the device as seen."""
self.source_type = source_type
self.last_seen = dt_util.utcnow()
self.host_name = host_name
self.location_name = location_name
self.consider_home = consider_home or self.consider_home
if battery:
self.battery = battery
if attributes:
self._attributes.update(attributes)
self.gps = None
if gps is not None:
try:
self.gps = float(gps[0]), float(gps[1])
self.gps_accuracy = gps_accuracy or 0
except (ValueError, TypeError, IndexError):
self.gps = None
self.gps_accuracy = 0
_LOGGER.warning(
"Could not parse gps value for %s: %s", self.dev_id, gps)
# pylint: disable=not-an-iterable
await self.async_update()
def stale(self, now: dt_util.dt.datetime = None):
"""Return if device state is stale.
Async friendly.
"""
return self.last_seen and \
(now or dt_util.utcnow()) - self.last_seen > self.consider_home
async def async_update(self):
"""Update state of entity.
This method is a coroutine.
"""
if not self.last_seen:
return
if self.location_name:
self._state = self.location_name
elif self.gps is not None and self.source_type == SOURCE_TYPE_GPS:
zone_state = async_active_zone(
self.hass, self.gps[0], self.gps[1], self.gps_accuracy)
if zone_state is None:
self._state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
self._state = STATE_HOME
else:
self._state = zone_state.name
elif self.stale():
self._state = STATE_NOT_HOME
self.gps = None
self.last_update_home = False
else:
self._state = STATE_HOME
self.last_update_home = True
async def async_added_to_hass(self):
"""Add an entity."""
state = await async_get_last_state(self.hass, self.entity_id)
if not state:
return
self._state = state.state
for attr, var in (
(ATTR_SOURCE_TYPE, 'source_type'),
(ATTR_GPS_ACCURACY, 'gps_accuracy'),
(ATTR_BATTERY, 'battery'),
):
if attr in state.attributes:
setattr(self, var, state.attributes[attr])
if ATTR_LONGITUDE in state.attributes:
self.gps = (state.attributes[ATTR_LATITUDE],
state.attributes[ATTR_LONGITUDE])
class DeviceScanner:
"""Device scanner object."""
hass = None # type: HomeAssistantType
def scan_devices(self) -> List[str]:
"""Scan for devices."""
raise NotImplementedError()
def async_scan_devices(self) -> Any:
"""Scan for devices.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.scan_devices)
def get_device_name(self, device: str) -> str:
"""Get the name of a device."""
raise NotImplementedError()
def async_get_device_name(self, device: str) -> Any:
"""Get the name of a device.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.get_device_name, device)
def get_extra_attributes(self, device: str) -> dict:
"""Get the extra attributes of a device."""
raise NotImplementedError()
def async_get_extra_attributes(self, device: str) -> Any:
"""Get the extra attributes of a device.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.get_extra_attributes, device)
def load_config(path: str, hass: HomeAssistantType, consider_home: timedelta):
"""Load devices from YAML configuration file."""
return run_coroutine_threadsafe(
async_load_config(path, hass, consider_home), hass.loop).result()
async def async_load_config(path: str, hass: HomeAssistantType,
consider_home: timedelta):
"""Load devices from YAML configuration file.
This method is a coroutine.
"""
dev_schema = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON, default=None): vol.Any(None, cv.icon),
vol.Optional('track', default=False): cv.boolean,
vol.Optional(CONF_MAC, default=None):
vol.Any(None, vol.All(cv.string, vol.Upper)),
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
vol.Optional('gravatar', default=None): vol.Any(None, cv.string),
vol.Optional('picture', default=None): vol.Any(None, cv.string),
vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(
cv.time_period, cv.positive_timedelta),
})
try:
result = []
try:
devices = await hass.async_add_job(
load_yaml_config_file, path)
except HomeAssistantError as err:
_LOGGER.error("Unable to load %s: %s", path, str(err))
return []
for dev_id, device in devices.items():
# Deprecated option. We just ignore it to avoid breaking change
device.pop('vendor', None)
try:
device = dev_schema(device)
device['dev_id'] = cv.slugify(dev_id)
except vol.Invalid as exp:
async_log_exception(exp, dev_id, devices, hass)
else:
result.append(Device(hass, **device))
return result
except (HomeAssistantError, FileNotFoundError):
# When YAML file could not be loaded/did not contain a dict
return []
@callback
def async_setup_scanner_platform(hass: HomeAssistantType, config: ConfigType,
scanner: Any, async_see_device: Callable,
platform: str):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
update_lock = asyncio.Lock(loop=hass.loop)
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen = set() # type: Any
async def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
_LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s", platform, interval)
return
async with update_lock:
found_devices = await scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = await scanner.async_get_device_name(mac)
seen.add(mac)
try:
extra_attributes = (await
scanner.async_get_extra_attributes(mac))
except NotImplementedError:
extra_attributes = dict()
kwargs = {
'mac': mac,
'host_name': host_name,
'source_type': SOURCE_TYPE_ROUTER,
'attributes': {
'scanner': scanner.__class__.__name__,
**extra_attributes
}
}
zone_home = hass.states.get(zone.ENTITY_ID_HOME)
if zone_home:
kwargs['gps'] = [zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE]]
kwargs['gps_accuracy'] = 0
hass.async_create_task(async_see_device(**kwargs))
async_track_time_interval(hass, async_device_tracker_scan, interval)
hass.async_create_task(async_device_tracker_scan(None))
def update_config(path: str, dev_id: str, device: Device):
"""Add device to YAML configuration file."""
with open(path, 'a') as out:
device = {device.dev_id: {
ATTR_NAME: device.name,
ATTR_MAC: device.mac,
ATTR_ICON: device.icon,
'picture': device.config_picture,
'track': device.track,
CONF_AWAY_HIDE: device.away_hide,
}}
out.write('\n')
out.write(dump(device))
def get_gravatar_for_email(email: str):
"""Return an 80px Gravatar for the given email address.
Async friendly.
"""
import hashlib
url = 'https://www.gravatar.com/avatar/{}.jpg?s=80&d=wavatar'
return url.format(hashlib.md5(email.encode('utf-8').lower()).hexdigest())
|
ribag/ganeti-experiments | refs/heads/topic-cli-quote | test/py/ganeti.opcodes_unittest.py | 3 | #!/usr/bin/python
#
# Copyright (C) 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.backend"""
import os
import sys
import unittest
from ganeti import utils
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import ht
from ganeti import constants
from ganeti import errors
from ganeti import compat
import testutils
class TestOpcodes(unittest.TestCase):
def test(self):
self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, None)
self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, "")
self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, {})
self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, {"OP_ID": ""})
for cls in opcodes.OP_MAPPING.values():
self.assert_(cls.OP_ID.startswith("OP_"))
self.assert_(len(cls.OP_ID) > 3)
self.assertEqual(cls.OP_ID, cls.OP_ID.upper())
self.assertEqual(cls.OP_ID, opcodes_base._NameToId(cls.__name__))
self.assertFalse(
compat.any(cls.OP_ID.startswith(prefix)
for prefix in opcodes_base.SUMMARY_PREFIX.keys()))
self.assertTrue(callable(cls.OP_RESULT),
msg=("%s should have a result check" % cls.OP_ID))
self.assertRaises(TypeError, cls, unsupported_parameter="some value")
args = [
# No variables
{},
# Variables supported by all opcodes
{"dry_run": False, "debug_level": 0, },
# All variables
dict([(name, []) for name in cls.GetAllSlots()])
]
for i in args:
op = cls(**i)
self.assertEqual(op.OP_ID, cls.OP_ID)
self._checkSummary(op)
# Try a restore
state = op.__getstate__()
self.assert_(isinstance(state, dict))
restored = opcodes.OpCode.LoadOpCode(state)
self.assert_(isinstance(restored, cls))
self._checkSummary(restored)
for name in ["x_y_z", "hello_world"]:
assert name not in cls.GetAllSlots()
for value in [None, True, False, [], "Hello World"]:
self.assertRaises(AttributeError, setattr, op, name, value)
def _checkSummary(self, op):
summary = op.Summary()
if hasattr(op, "OP_DSC_FIELD"):
self.assert_(("OP_%s" % summary).startswith("%s(" % op.OP_ID))
self.assert_(summary.endswith(")"))
else:
self.assertEqual("OP_%s" % summary, op.OP_ID)
def testSummary(self):
class OpTest(opcodes.OpCode):
OP_DSC_FIELD = "data"
OP_PARAMS = [
("data", ht.NoDefault, ht.TString, None),
]
self.assertEqual(OpTest(data="").Summary(), "TEST()")
self.assertEqual(OpTest(data="Hello World").Summary(),
"TEST(Hello World)")
self.assertEqual(OpTest(data="node1.example.com").Summary(),
"TEST(node1.example.com)")
def testSummaryFormatter(self):
class OpTest(opcodes.OpCode):
OP_DSC_FIELD = "data"
OP_DSC_FORMATTER = lambda _, v: "a"
OP_PARAMS = [
("data", ht.NoDefault, ht.TString, None),
]
self.assertEqual(OpTest(data="").Summary(), "TEST(a)")
self.assertEqual(OpTest(data="b").Summary(), "TEST(a)")
def testTinySummary(self):
self.assertFalse(
utils.FindDuplicates(opcodes_base.SUMMARY_PREFIX.values()))
self.assertTrue(compat.all(prefix.endswith("_") and supplement.endswith("_")
for (prefix, supplement) in
opcodes_base.SUMMARY_PREFIX.items()))
self.assertEqual(opcodes.OpClusterPostInit().TinySummary(), "C_POST_INIT")
self.assertEqual(opcodes.OpNodeRemove().TinySummary(), "N_REMOVE")
self.assertEqual(opcodes.OpInstanceMigrate().TinySummary(), "I_MIGRATE")
self.assertEqual(opcodes.OpTestJqueue().TinySummary(), "TEST_JQUEUE")
def testListSummary(self):
class OpTest(opcodes.OpCode):
OP_DSC_FIELD = "data"
OP_PARAMS = [
("data", ht.NoDefault, ht.TList, None),
]
self.assertEqual(OpTest(data=["a", "b", "c"]).Summary(),
"TEST(a,b,c)")
self.assertEqual(OpTest(data=["a", None, "c"]).Summary(),
"TEST(a,None,c)")
self.assertEqual(OpTest(data=[1, 2, 3, 4]).Summary(), "TEST(1,2,3,4)")
def testOpId(self):
self.assertFalse(utils.FindDuplicates(cls.OP_ID
for cls in opcodes._GetOpList()))
self.assertEqual(len(opcodes._GetOpList()), len(opcodes.OP_MAPPING))
def testParams(self):
supported_by_all = set(["debug_level", "dry_run", "priority"])
self.assertTrue(opcodes_base.BaseOpCode not in opcodes.OP_MAPPING.values())
self.assertTrue(opcodes.OpCode not in opcodes.OP_MAPPING.values())
for cls in opcodes.OP_MAPPING.values() + [opcodes.OpCode]:
all_slots = cls.GetAllSlots()
self.assertEqual(len(set(all_slots) & supported_by_all), 3,
msg=("Opcode %s doesn't support all base"
" parameters (%r)" % (cls.OP_ID, supported_by_all)))
# All opcodes must have OP_PARAMS
self.assert_(hasattr(cls, "OP_PARAMS"),
msg="%s doesn't have OP_PARAMS" % cls.OP_ID)
param_names = [name for (name, _, _, _) in cls.GetAllParams()]
self.assertEqual(all_slots, param_names)
# Without inheritance
self.assertEqual(cls.__slots__,
[name for (name, _, _, _) in cls.OP_PARAMS])
# This won't work if parameters are converted to a dictionary
duplicates = utils.FindDuplicates(param_names)
self.assertFalse(duplicates,
msg=("Found duplicate parameters %r in %s" %
(duplicates, cls.OP_ID)))
# Check parameter definitions
for attr_name, aval, test, doc in cls.GetAllParams():
self.assert_(attr_name)
self.assertTrue(callable(test),
msg=("Invalid type check for %s.%s" %
(cls.OP_ID, attr_name)))
self.assertTrue(doc is None or isinstance(doc, basestring))
if callable(aval):
default_value = aval()
self.assertFalse(callable(default_value),
msg=("Default value of %s.%s returned by function"
" is callable" % (cls.OP_ID, attr_name)))
else:
default_value = aval
if aval is not ht.NoDefault and aval is not None:
self.assertTrue(test(default_value),
msg=("Default value of %s.%s does not verify" %
(cls.OP_ID, attr_name)))
# If any parameter has documentation, all others need to have it as well
has_doc = [doc is not None for (_, _, _, doc) in cls.OP_PARAMS]
self.assertTrue(not compat.any(has_doc) or compat.all(has_doc),
msg="%s does not document all parameters" % cls)
def testValidateNoModification(self):
class OpTest(opcodes.OpCode):
OP_PARAMS = [
("nodef", None, ht.TString, None),
("wdef", "default", ht.TMaybeString, None),
("number", 0, ht.TInt, None),
("notype", None, ht.TAny, None),
]
# Missing required parameter "nodef"
op = OpTest()
before = op.__getstate__()
self.assertRaises(errors.OpPrereqError, op.Validate, False)
self.assertTrue(op.nodef is None)
self.assertEqual(op.wdef, "default")
self.assertEqual(op.number, 0)
self.assertTrue(op.notype is None)
self.assertEqual(op.__getstate__(), before, msg="Opcode was modified")
# Required parameter "nodef" is provided
op = OpTest(nodef="foo")
before = op.__getstate__()
op.Validate(False)
self.assertEqual(op.__getstate__(), before, msg="Opcode was modified")
self.assertEqual(op.nodef, "foo")
self.assertEqual(op.wdef, "default")
self.assertEqual(op.number, 0)
self.assertTrue(op.notype is None)
# Missing required parameter "nodef"
op = OpTest(wdef="hello", number=999)
before = op.__getstate__()
self.assertRaises(errors.OpPrereqError, op.Validate, False)
self.assertTrue(op.nodef is None)
self.assertTrue(op.notype is None)
self.assertEqual(op.__getstate__(), before, msg="Opcode was modified")
# Wrong type for "nodef"
op = OpTest(nodef=987)
before = op.__getstate__()
self.assertRaises(errors.OpPrereqError, op.Validate, False)
self.assertEqual(op.nodef, 987)
self.assertTrue(op.notype is None)
self.assertEqual(op.__getstate__(), before, msg="Opcode was modified")
# Testing different types for "notype"
op = OpTest(nodef="foo", notype=[1, 2, 3])
before = op.__getstate__()
op.Validate(False)
self.assertEqual(op.nodef, "foo")
self.assertEqual(op.notype, [1, 2, 3])
self.assertEqual(op.__getstate__(), before, msg="Opcode was modified")
op = OpTest(nodef="foo", notype="Hello World")
before = op.__getstate__()
op.Validate(False)
self.assertEqual(op.nodef, "foo")
self.assertEqual(op.notype, "Hello World")
self.assertEqual(op.__getstate__(), before, msg="Opcode was modified")
def testValidateSetDefaults(self):
class OpTest(opcodes.OpCode):
OP_PARAMS = [
("value1", "default", ht.TMaybeString, None),
("value2", "result", ht.TMaybeString, None),
]
op = OpTest()
op.Validate(True)
self.assertEqual(op.value1, "default")
self.assertEqual(op.value2, "result")
self.assert_(op.dry_run is None)
self.assert_(op.debug_level is None)
self.assertEqual(op.priority, constants.OP_PRIO_DEFAULT)
op = OpTest(value1="hello", value2="world", debug_level=123)
op.Validate(True)
self.assertEqual(op.value1, "hello")
self.assertEqual(op.value2, "world")
self.assertEqual(op.debug_level, 123)
def testOpInstanceMultiAlloc(self):
inst = dict([(name, []) for name in opcodes.OpInstanceCreate.GetAllSlots()])
inst_op = opcodes.OpInstanceCreate(**inst)
inst_state = inst_op.__getstate__()
multialloc = opcodes.OpInstanceMultiAlloc(instances=[inst_op])
state = multialloc.__getstate__()
self.assertEquals(state["instances"], [inst_state])
loaded_multialloc = opcodes.OpCode.LoadOpCode(state)
(loaded_inst,) = loaded_multialloc.instances
self.assertNotEquals(loaded_inst, inst_op)
self.assertEquals(loaded_inst.__getstate__(), inst_state)
class TestOpcodeDepends(unittest.TestCase):
def test(self):
check_relative = opcodes_base.BuildJobDepCheck(True)
check_norelative = opcodes_base.TNoRelativeJobDependencies
for fn in [check_relative, check_norelative]:
self.assertTrue(fn(None))
self.assertTrue(fn([]))
self.assertTrue(fn([(1, [])]))
self.assertTrue(fn([(719833, [])]))
self.assertTrue(fn([("24879", [])]))
self.assertTrue(fn([(2028, [constants.JOB_STATUS_ERROR])]))
self.assertTrue(fn([
(2028, [constants.JOB_STATUS_ERROR]),
(18750, []),
(5063, [constants.JOB_STATUS_SUCCESS, constants.JOB_STATUS_ERROR]),
]))
self.assertFalse(fn(1))
self.assertFalse(fn([(9, )]))
self.assertFalse(fn([(15194, constants.JOB_STATUS_ERROR)]))
for i in [
[(-1, [])],
[(-27740, [constants.JOB_STATUS_CANCELED, constants.JOB_STATUS_ERROR]),
(-1, [constants.JOB_STATUS_ERROR]),
(9921, [])],
]:
self.assertTrue(check_relative(i))
self.assertFalse(check_norelative(i))
class TestResultChecks(unittest.TestCase):
def testJobIdList(self):
for i in [[], [(False, "error")], [(False, "")],
[(True, 123), (True, "999")]]:
self.assertTrue(ht.TJobIdList(i))
for i in ["", [("x", 1)], [[], []], [[False, "", None], [True, 123]]]:
self.assertFalse(ht.TJobIdList(i))
def testJobIdListOnly(self):
self.assertTrue(ht.TJobIdListOnly({
constants.JOB_IDS_KEY: [],
}))
self.assertTrue(ht.TJobIdListOnly({
constants.JOB_IDS_KEY: [(True, "9282")],
}))
self.assertFalse(ht.TJobIdListOnly({
"x": None,
}))
self.assertFalse(ht.TJobIdListOnly({
constants.JOB_IDS_KEY: [],
"x": None,
}))
self.assertFalse(ht.TJobIdListOnly({
constants.JOB_IDS_KEY: [("foo", "bar")],
}))
self.assertFalse(ht.TJobIdListOnly({
constants.JOB_IDS_KEY: [("one", "two", "three")],
}))
class TestOpInstanceSetParams(unittest.TestCase):
def _GenericTests(self, fn):
self.assertTrue(fn([]))
self.assertTrue(fn([(constants.DDM_ADD, {})]))
self.assertTrue(fn([(constants.DDM_REMOVE, {})]))
for i in [0, 1, 2, 3, 9, 10, 1024]:
self.assertTrue(fn([(i, {})]))
self.assertFalse(fn(None))
self.assertFalse(fn({}))
self.assertFalse(fn(""))
self.assertFalse(fn(0))
self.assertFalse(fn([(-100, {})]))
self.assertFalse(fn([(constants.DDM_ADD, 2, 3)]))
self.assertFalse(fn([[constants.DDM_ADD]]))
def testNicModifications(self):
fn = ht.TSetParamsMods(ht.TINicParams)
self._GenericTests(fn)
for param in constants.INIC_PARAMS:
self.assertTrue(fn([[constants.DDM_ADD, {param: None}]]))
self.assertTrue(fn([[constants.DDM_ADD, {param: param}]]))
def testDiskModifications(self):
fn = ht.TSetParamsMods(ht.TIDiskParams)
self._GenericTests(fn)
for param in constants.IDISK_PARAMS:
self.assertTrue(fn([[constants.DDM_ADD, {param: 0}]]))
self.assertTrue(fn([[constants.DDM_ADD, {param: param}]]))
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
simonwydooghe/ansible | refs/heads/devel | test/units/plugins/connection/test_connection.py | 45 | # (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import sys
import pytest
from units.compat import mock
from units.compat import unittest
from units.compat.mock import MagicMock
from units.compat.mock import patch
from ansible.errors import AnsibleError
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.loader import become_loader
# from ansible.plugins.connection.accelerate import Connection as AccelerateConnection
# from ansible.plugins.connection.chroot import Connection as ChrootConnection
# from ansible.plugins.connection.funcd import Connection as FuncdConnection
# from ansible.plugins.connection.jail import Connection as JailConnection
# from ansible.plugins.connection.libvirt_lxc import Connection as LibvirtLXCConnection
from ansible.plugins.connection.lxc import Connection as LxcConnection
from ansible.plugins.connection.local import Connection as LocalConnection
from ansible.plugins.connection.paramiko_ssh import Connection as ParamikoConnection
from ansible.plugins.connection.ssh import Connection as SSHConnection
from ansible.plugins.connection.docker import Connection as DockerConnection
# from ansible.plugins.connection.winrm import Connection as WinRmConnection
from ansible.plugins.connection.network_cli import Connection as NetworkCliConnection
from ansible.plugins.connection.httpapi import Connection as HttpapiConnection
pytest.importorskip("ncclient")
PY3 = sys.version_info[0] == 3
builtin_import = __import__
mock_ncclient = MagicMock(name='ncclient')
def import_mock(name, *args):
if name.startswith('ncclient'):
return mock_ncclient
return builtin_import(name, *args)
if PY3:
with patch('builtins.__import__', side_effect=import_mock):
from ansible.plugins.connection.netconf import Connection as NetconfConnection
else:
with patch('__builtin__.__import__', side_effect=import_mock):
from ansible.plugins.connection.netconf import Connection as NetconfConnection
class TestConnectionBaseClass(unittest.TestCase):
def setUp(self):
self.play_context = PlayContext()
self.play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
self.in_stream = StringIO()
def tearDown(self):
pass
def test_subclass_error(self):
class ConnectionModule1(ConnectionBase):
pass
with self.assertRaises(TypeError):
ConnectionModule1() # pylint: disable=abstract-class-instantiated
class ConnectionModule2(ConnectionBase):
def get(self, key):
super(ConnectionModule2, self).get(key)
with self.assertRaises(TypeError):
ConnectionModule2() # pylint: disable=abstract-class-instantiated
def test_subclass_success(self):
class ConnectionModule3(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
self.assertIsInstance(ConnectionModule3(self.play_context, self.in_stream), ConnectionModule3)
# def test_accelerate_connection_module(self):
# self.assertIsInstance(AccelerateConnection(), AccelerateConnection)
#
# def test_chroot_connection_module(self):
# self.assertIsInstance(ChrootConnection(), ChrootConnection)
#
# def test_funcd_connection_module(self):
# self.assertIsInstance(FuncdConnection(), FuncdConnection)
#
# def test_jail_connection_module(self):
# self.assertIsInstance(JailConnection(), JailConnection)
#
# def test_libvirt_lxc_connection_module(self):
# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection)
def test_lxc_connection_module(self):
self.assertIsInstance(LxcConnection(self.play_context, self.in_stream), LxcConnection)
def test_local_connection_module(self):
self.assertIsInstance(LocalConnection(self.play_context, self.in_stream), LocalConnection)
def test_paramiko_connection_module(self):
self.assertIsInstance(ParamikoConnection(self.play_context, self.in_stream), ParamikoConnection)
def test_ssh_connection_module(self):
self.assertIsInstance(SSHConnection(self.play_context, self.in_stream), SSHConnection)
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.2.3', '', 0))
def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('docker version', '1.3.4', '', 0))
def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
DockerConnection)
# old version and new version fail
@mock.patch('ansible.plugins.connection.docker.Connection._old_docker_version', return_value=('false', 'garbage', '', 1))
@mock.patch('ansible.plugins.connection.docker.Connection._new_docker_version', return_value=('false', 'garbage', '', 1))
def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
# def test_winrm_connection_module(self):
# self.assertIsInstance(WinRmConnection(), WinRmConnection)
def test_network_cli_connection_module(self):
self.play_context.network_os = 'eos'
self.assertIsInstance(NetworkCliConnection(self.play_context, self.in_stream), NetworkCliConnection)
def test_netconf_connection_module(self):
self.assertIsInstance(NetconfConnection(self.play_context, self.in_stream), NetconfConnection)
def test_httpapi_connection_module(self):
self.play_context.network_os = 'eos'
self.assertIsInstance(HttpapiConnection(self.play_context, self.in_stream), HttpapiConnection)
def test_check_password_prompt(self):
local = (
b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
)
ssh_pipelining_vvvv = b'''
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251
debug2: process_mux_new_session: channel 1: request tty 0, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0
debug3: process_mux_new_session: got fds stdin 9, stdout 10, stderr 11
debug2: client_session2_setup: id 2
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug2: channel 2: rcvd ext data 67
[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug2: channel 2: written 67 to efd 11
BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq
debug3: receive packet: type 98
''' # noqa
ssh_nopipelining_vvvv = b'''
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 251
debug2: process_mux_new_session: channel 1: request tty 1, X 1, agent 1, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'", env 0
debug3: mux_client_request_session: session request sent
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq; /bin/true'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug2: exec request accepted on channel 2
[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: debug3: receive packet: type 2
debug3: Received SSH2_MSG_IGNORE
debug3: Received SSH2_MSG_IGNORE
BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq
debug3: receive packet: type 98
''' # noqa
ssh_novvvv = (
b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
)
dns_issue = (
b'timeout waiting for privilege escalation password prompt:\n'
b'sudo: sudo: unable to resolve host tcloud014\n'
b'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: \n'
b'BECOME-SUCCESS-ouzmdnewuhucvuaabtjmweasarviygqq\n'
)
nothing = b''
in_front = b'''
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: " -u root /bin/sh -c '"'"'echo
'''
class ConnectionFoo(ConnectionBase):
@property
def transport(self):
pass
def _connect(self):
pass
def exec_command(self):
pass
def put_file(self):
pass
def fetch_file(self):
pass
def close(self):
pass
c = ConnectionFoo(self.play_context, self.in_stream)
c.set_become_plugin(become_loader.get('sudo'))
c.become.prompt = '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
self.assertTrue(c.check_password_prompt(local))
self.assertTrue(c.check_password_prompt(ssh_pipelining_vvvv))
self.assertTrue(c.check_password_prompt(ssh_nopipelining_vvvv))
self.assertTrue(c.check_password_prompt(ssh_novvvv))
self.assertTrue(c.check_password_prompt(dns_issue))
self.assertFalse(c.check_password_prompt(nothing))
self.assertFalse(c.check_password_prompt(in_front))
|
seeminglee/pyglet64 | refs/heads/master | tools/epydoc/epydoc/log.py | 99 | # epydoc -- Logging
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: log.py 1488 2007-02-14 00:34:27Z edloper $
"""
Functions used to report messages and progress updates to the user.
These functions are delegated to zero or more registered L{Logger}
objects, which are responsible for actually presenting the information
to the user. Different interfaces are free to create and register
their own C{Logger}s, allowing them to present this information in the
manner that is best suited to each interface.
@note: I considered using the standard C{logging} package to provide
this functionality. However, I found that it would be too difficult
to get that package to provide the behavior I want (esp. with respect
to progress displays; but also with respect to message blocks).
@group Message Severity Levels: DEBUG, INFO, WARNING, ERROR, FATAL
"""
__docformat__ = 'epytext en'
import sys, os
DEBUG = 10
INFO = 20
DOCSTRING_WARNING = 25
WARNING = 30
ERROR = 40
FATAL = 40
######################################################################
# Logger Base Class
######################################################################
class Logger:
"""
An abstract base class that defines the interface for X{loggers},
which are used by epydoc to report information back to the user.
Loggers are responsible for tracking two types of information:
- Messages, such as warnings and errors.
- Progress on the current task.
This abstract class allows the command-line interface and the
graphical interface to each present this information to the user
in the way that's most natural for each interface. To set up a
logger, create a subclass of C{Logger} that overrides all methods,
and register it using L{register_logger}.
"""
#////////////////////////////////////////////////////////////
# Messages
#////////////////////////////////////////////////////////////
def log(self, level, message):
"""
Display a message.
@param message: The message string to display. C{message} may
contain newlines, but does not need to end in a newline.
@param level: An integer value indicating the severity of the
message.
"""
def close(self):
"""
Perform any tasks needed to close this logger.
"""
#////////////////////////////////////////////////////////////
# Message blocks
#////////////////////////////////////////////////////////////
def start_block(self, header):
"""
Start a new message block. Any calls to L{info()},
L{warning()}, or L{error()} that occur between a call to
C{start_block} and a corresponding call to C{end_block} will
be grouped together, and displayed with a common header.
C{start_block} can be called multiple times (to form nested
blocks), but every call to C{start_block} I{must} be balanced
by a call to C{end_block}.
"""
def end_block(self):
"""
End a warning block. See L{start_block} for details.
"""
#////////////////////////////////////////////////////////////
# Progress bar
#////////////////////////////////////////////////////////////
def start_progress(self, header=None):
"""
Begin displaying progress for a new task. C{header} is a
description of the task for which progress is being reported.
Each call to C{start_progress} must be followed by a call to
C{end_progress} (with no intervening calls to
C{start_progress}).
"""
def end_progress(self):
"""
Finish off the display of progress for the current task. See
L{start_progress} for more information.
"""
def progress(self, percent, message=''):
"""
Update the progress display.
@param percent: A float from 0.0 to 1.0, indicating how much
progress has been made.
@param message: A message indicating the most recent action
that contributed towards that progress.
"""
class SimpleLogger(Logger):
def __init__(self, threshold=WARNING):
self.threshold = threshold
def log(self, level, message):
if level >= self.threshold: print message
######################################################################
# Logger Registry
######################################################################
_loggers = []
"""
The list of registered logging functions.
"""
def register_logger(logger):
"""
Register a logger. Each call to one of the logging functions
defined by this module will be delegated to each registered
logger.
"""
_loggers.append(logger)
def remove_logger(logger):
_loggers.remove(logger)
######################################################################
# Logging Functions
######################################################################
# The following methods all just delegate to the corresponding
# methods in the Logger class (above) for each registered logger.
def fatal(*messages):
"""Display the given fatal message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(FATAL, message)
def error(*messages):
"""Display the given error message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(ERROR, message)
def warning(*messages):
"""Display the given warning message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(WARNING, message)
def docstring_warning(*messages):
"""Display the given docstring warning message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(DOCSTRING_WARNING, message)
def info(*messages):
"""Display the given informational message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(INFO, message)
def debug(*messages):
"""Display the given debugging message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(DEBUG, message)
def start_block(header):
for logger in _loggers: logger.start_block(header)
start_block.__doc__ = Logger.start_block.__doc__
def end_block():
for logger in _loggers: logger.end_block()
end_block.__doc__ = Logger.end_block.__doc__
def start_progress(header=None):
for logger in _loggers: logger.start_progress(header)
start_progress.__doc__ = Logger.start_progress.__doc__
def end_progress():
for logger in _loggers: logger.end_progress()
end_progress.__doc__ = Logger.end_progress.__doc__
def progress(percent, message=''):
for logger in _loggers: logger.progress(percent, '%s' % message)
progress.__doc__ = Logger.progress.__doc__
def close():
for logger in _loggers: logger.close()
|
inclement/kivy | refs/heads/master | kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py | 30 | from kivy.tools.packaging.pyinstaller_hooks import (
add_dep_paths, excludedimports, datas, get_deps_all,
get_factory_modules, kivy_modules)
add_dep_paths()
hiddenimports = [] # get_deps_all()['hiddenimports']
hiddenimports = list(set(
get_factory_modules() + kivy_modules + hiddenimports))
|
Yen-Chung-En/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/urllib/__init__.py | 12133432 | |
xrmx/django | refs/heads/master | django/conf/locale/pt/__init__.py | 12133432 | |
brownman/selenium-webdriver | refs/heads/master | selenium/src/py/lib/docutils/parsers/rst/directives/tables.py | 5 | # Authors: David Goodger, David Priest
# Contact: goodger@python.org
# Revision: $Revision: 3900 $
# Date: $Date: 2005-09-24 17:11:20 +0200 (Sat, 24 Sep 2005) $
# Copyright: This module has been placed in the public domain.
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
from docutils import io, nodes, statemachine, utils
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import directives
try:
import csv # new in Python 2.3
except ImportError:
csv = None
try:
import urllib2
except ImportError:
urllib2 = None
try:
True
except NameError: # Python 2.2 & 2.1 compatibility
True = not 0
False = not 1
def table(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not content:
warning = state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [warning]
title, messages = make_title(arguments, state, lineno)
node = nodes.Element() # anonymous container for parsing
state.nested_parse(content, content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one table expected.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [error]
table_node = node[0]
table_node['classes'] += options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
table.arguments = (0, 1, 1)
table.options = {'class': directives.class_option}
table.content = 1
def make_title(arguments, state, lineno):
if arguments:
title_text = arguments[0]
text_nodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
if csv:
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive function."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if options.has_key('delim'):
self.delimiter = str(options['delim'])
if options.has_key('keepspace'):
self.skipinitialspace = False
if options.has_key('quote'):
self.quotechar = str(options['quote'])
if options.has_key('escape'):
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def csv_table(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
if ( not state.document.settings.file_insertion_enabled
and (options.has_key('file') or options.has_key('url')) ):
warning = state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" directive.' %
name, nodes.literal_block(block_text,block_text), line=lineno)
return [warning]
check_requirements(name, lineno, block_text, state_machine)
title, messages = make_title(arguments, state, lineno)
csv_data, source = get_csv_data(
name, options, content, lineno, block_text, state, state_machine)
table_head, max_header_cols = process_header_option(
options, state_machine, lineno)
rows, max_cols = parse_csv_data_into_rows(
csv_data, DocutilsDialect(options), source, options)
max_cols = max(max_cols, max_header_cols)
header_rows = options.get('header-rows', 0) # default 0
stub_columns = options.get('stub-columns', 0) # default 0
check_table_dimensions(
rows, header_rows, stub_columns, name, lineno,
block_text, state_machine)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = get_column_widths(
max_cols, name, options, lineno, block_text, state_machine)
extend_short_rows_with_empty_cells(max_cols, (table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s' % (name, detail),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = state.build_table(table, content_offset, stub_columns)
table_node['classes'] += options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
csv_table.arguments = (0, 1, 1)
csv_table.options = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
csv_table.content = 1
def check_requirements(name, lineno, block_text, state_machine):
if not csv:
error = state_machine.reporter.error(
'The "%s" directive is not compatible with this version of '
'Python (%s). Requires the "csv" module, new in Python 2.3.'
% (name, sys.version.split()[0]),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
def get_csv_data(name, options, content, lineno, block_text,
state, state_machine):
"""
CSV data can come from the directive content, from an external file, or
from a URL reference.
"""
encoding = options.get('encoding', state.document.settings.input_encoding)
if content: # CSV data is from directive content
if options.has_key('file') or options.has_key('url'):
error = state_machine.reporter.error(
'"%s" directive may not both specify an external file and '
'have content.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
source = content.source(0)
csv_data = content
elif options.has_key('file'): # CSV data is from an external file
if options.has_key('url'):
error = state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(state.document.current_source))
source = os.path.normpath(os.path.join(source_dir, options['file']))
source = utils.relative_path(None, source)
try:
state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler
=state.document.settings.input_encoding_error_handler,
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.' % (name, error),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(severe)
elif options.has_key('url'): # CSV data is from a URL
if not urllib2:
severe = state_machine.reporter.severe(
'Problems with the "%s" directive and its "url" option: '
'unable to access the required functionality (from the '
'"urllib2" module).' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(severe)
source = options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (name, options['url'], error),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=state.document.settings.input_encoding_error_handler)
csv_data = csv_file.read().splitlines()
else:
error = state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
return csv_data, source
def process_header_option(options, state_machine, lineno):
source = state_machine.get_source(lineno - 1)
table_head = []
max_header_cols = 0
if options.has_key('header'): # separate table header in option
rows, max_header_cols = parse_csv_data_into_rows(
options['header'].split('\n'), HeaderDialect(), source, options)
table_head.extend(rows)
return table_head, max_header_cols
def parse_csv_data_into_rows(csv_data, dialect, source, options):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([line.encode('utf-8') for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = unicode(cell, 'utf-8')
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
def check_table_dimensions(rows, header_rows, stub_columns, name, lineno,
block_text, state_machine):
if len(rows) < header_rows:
error = state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data supplied '
'("%s" directive).' % (header_rows, len(rows), name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining for '
'table body, required by "%s" directive.' % (len(rows), name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of data '
'supplied ("%s" directive).' % (stub_columns, len(row), name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
def get_column_widths(max_cols, name, options, lineno, block_text,
state_machine):
if options.has_key('widths'):
col_widths = options['widths']
if len(col_widths) != max_cols:
error = state_machine.reporter.error(
'"%s" widths do not match the number of columns in table (%s).'
% (name, max_cols),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 / max_cols] * max_cols
else:
error = state_machine.reporter.error(
'No table data detected in CSV file.',
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
def list_table(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
if not content:
error = state_machine.reporter.error(
'The "%s" directive is empty; content required.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
title, messages = make_title(arguments, state, lineno)
node = nodes.Element() # anonymous container for parsing
state.nested_parse(content, content_offset, node)
try:
num_cols, col_widths = check_list_content(
node, name, options, content, lineno, block_text, state_machine)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = options.get('header-rows', 0) # default 0
stub_columns = options.get('stub-columns', 0) # default 0
check_table_dimensions(
table_data, header_rows, stub_columns, name, lineno,
block_text, state_machine)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
list_table.arguments = (0, 1, 1)
list_table.options = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option}
list_table.content = 1
def check_list_content(node, name, options, content, lineno, block_text,
state_machine):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not contain '
'a second-level bullet list.' % (name, item_index + 1),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s does '
'not contain the same number of items as row 1 (%s vs %s).'
% (name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = get_column_widths(
num_cols, name, options, lineno, block_text, state_machine)
if len(col_widths) != num_cols:
error = state_machine.reporter.error(
'Error parsing "widths" option of the "%s" directive: '
'number of columns does not match the table data (%s vs %s).'
% (name, len(col_widths), num_cols),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
return num_cols, col_widths
def build_table_from_list(table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
|
diegobanol/VentasPc | refs/heads/master | requests/auth.py | 25 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
import warnings
from base64 import b64encode
from .compat import urlparse, str, basestring
from .cookies import extract_cookies_to_jar
from ._internal_utils import to_native_string
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({0!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({0!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(password),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
# If response is not 4xx, do not auth
# See https://github.com/kennethreitz/requests/issues/3772
if not 400 <= r.status_code < 500:
self._thread_local.num_401_calls = 1
return r
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
|
PredictiveScienceLab/GPy | refs/heads/master | GPy/models/__init__.py | 8 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .gp_regression import GPRegression
from .gp_classification import GPClassification
from .sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput
from .sparse_gp_classification import SparseGPClassification
from .gplvm import GPLVM
from .bcgplvm import BCGPLVM
from .sparse_gplvm import SparseGPLVM
from .warped_gp import WarpedGP
from .bayesian_gplvm import BayesianGPLVM
from .mrd import MRD
from .gradient_checker import GradientChecker, HessianChecker, SkewChecker
from .ss_gplvm import SSGPLVM
from .gp_coregionalized_regression import GPCoregionalizedRegression
from .sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression
from .gp_heteroscedastic_regression import GPHeteroscedasticRegression
from .ss_mrd import SSMRD
from .gp_kronecker_gaussian_regression import GPKroneckerGaussianRegression
from .gp_var_gauss import GPVariationalGaussianApproximation
from .one_vs_all_classification import OneVsAllClassification
from .one_vs_all_sparse_classification import OneVsAllSparseClassification
from .dpgplvm import DPBayesianGPLVM
|
horance-liu/tensorflow | refs/heads/master | tensorflow/contrib/nccl/__init__.py | 56 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for using NVIDIA nccl collective ops.
@@all_max
@@all_min
@@all_prod
@@all_sum
@@reduce_sum
@@broadcast
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_max
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_min
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_prod
from tensorflow.contrib.nccl.python.ops.nccl_ops import all_sum
from tensorflow.contrib.nccl.python.ops.nccl_ops import broadcast
from tensorflow.contrib.nccl.python.ops.nccl_ops import reduce_sum
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
dkulikovsky/ceres | refs/heads/master | tests/test_ceres.py | 1 | from unittest import TestCase
from mock import ANY, Mock, call, mock_open, patch
from ceres import *
def fetch_mock_open_writes(open_mock):
handle = open_mock()
return ''.join([ c[0][0] for c in handle.write.call_args_list])
class ModuleFunctionsTest(TestCase):
@patch('ceres.isdir', new=Mock(return_value=False))
@patch('ceres.CeresTree', new=Mock(spec=CeresTree))
def test_get_tree_with_no_tree(self):
tree = getTree('/graphite/storage/ceres/foo/bar')
self.assertEqual(None, tree)
@patch('ceres.CeresTree', spec=CeresTree)
@patch('ceres.isdir')
def test_get_tree_with_tree_samedir(self, isdir_mock, ceres_tree_mock):
isdir_mock.return_value = True
tree = getTree('/graphite/storage/ceres')
self.assertNotEqual(None, tree)
isdir_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree')
ceres_tree_mock.assert_called_once_with('/graphite/storage/ceres')
class TimeSeriesDataTest(TestCase):
def setUp(self):
self.time_series = TimeSeriesData(0, 50, 5, [float(x) for x in xrange(0, 10)])
def test_timestamps_property(self):
self.assertEqual(10, len(self.time_series.timestamps))
self.assertEqual(0, self.time_series.timestamps[0])
self.assertEqual(45, self.time_series.timestamps[-1])
def test_iter_values(self):
values = list(self.time_series)
self.assertEqual(10, len(values))
self.assertEqual((0, 0.0), values[0])
self.assertEqual((45, 9.0), values[-1])
def test_merge_no_missing(self):
# merge only has effect if time series has no gaps
other_series = TimeSeriesData(0, 25, 5, [float(x * x) for x in xrange(1, 6)])
original_values = list(self.time_series)
self.time_series.merge(other_series)
self.assertEqual(original_values, list(self.time_series))
def test_merge_with_empty(self):
new_series = TimeSeriesData(0, 50, 5, [None] * 10)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
def test_merge_with_holes(self):
values = []
for x in xrange(0, 10):
if x % 2 == 0:
values.append(x)
else:
values.append(None)
new_series = TimeSeriesData(0, 50, 5, values)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
class CeresTreeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=False))
def test_init_invalid(self):
self.assertRaises(ValueError, CeresTree, '/nonexistent_path')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch('ceres.abspath')
def test_init_valid(self, abspath_mock):
abspath_mock.return_value = '/var/graphite/storage/ceres'
tree = CeresTree('/graphite/storage/ceres')
abspath_mock.assert_called_once_with('/graphite/storage/ceres')
self.assertEqual('/var/graphite/storage/ceres', tree.root)
@patch('ceres.isdir', new=Mock(return_value=False))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_new_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch('__builtin__.open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
makedirs_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree', DIR_PERMS)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_existing_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch('__builtin__.open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
self.assertFalse(makedirs_mock.called)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__', new=Mock(return_value=None))
@patch('os.makedirs', new=Mock())
def test_create_tree_write_props(self):
props = {
"foo_prop": "foo_value",
"bar_prop": "bar_value"}
with patch('__builtin__.open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres', **props)
for (prop,value) in props.items():
open_mock.assert_any_call(join('/graphite/storage/ceres', '.ceres-tree', prop), 'w')
open_mock.return_value.write.assert_any_call(value)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_clean(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_trailing_slash(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo/')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_outside_tree(self):
self.assertRaises(ValueError, self.ceres_tree.getNodePath, '/metric/foo')
@patch('ceres.CeresNode', spec=CeresNode)
def test_get_node_uncached(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = self.ceres_tree.getNode('metrics.foo')
ceres_node_mock.assert_called_once_with(
self.ceres_tree,
'metrics.foo',
'/graphite/storage/ceres/metrics/foo')
self.assertEqual(result, ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_explicit_metric(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.foo'))
self.assertEqual(1, len(result))
self.assertEqual(result[0], ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob')
def test_find_wildcard(self, glob_mock, ceres_node_mock):
matches = ['foo', 'bar', 'baz']
glob_mock.side_effect = lambda x: [x.replace('*', m) for m in matches]
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(3, len(result))
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.bar', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.baz', ANY)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(return_value=[]))
def test_find_wildcard_no_matches(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = False
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(0, len(result))
self.assertFalse(ceres_node_mock.called)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = False
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(0, len(result))
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval_not_found(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = True
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(result[0], ceres_node_mock())
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
def test_store_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
datapoints = [(100, 1.0)]
self.assertRaises(NodeNotFound, self.ceres_tree.store, 'metrics.foo', datapoints)
@patch('ceres.CeresNode', spec=CeresNode)
def test_store_valid_node(self, ceres_node_mock):
datapoints = [(100, 1.0)]
self.ceres_tree.store('metrics.foo', datapoints)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.return_value.write.assert_called_once_with(datapoints)
def fetch_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
self.assertRaises(NodeNotFound, self.ceres_tree.fetch, 'metrics.foo')
@patch('ceres.CeresNode', spec=CeresNode)
def fetch_metric(self, ceres_node_mock):
read_mock = ceres_node_mock.return_value.read
read_mock.return_value = Mock(spec=TimeSeriesData)
result = self.ceres_tree.fetch('metrics.foo', 0, 1000)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
read_mock.assert_called_once_with(0, 1000)
self.assertEqual(Mock(spec=TimeSeriesData), result)
class CeresNodeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(self.ceres_tree, 'sample_metric', '/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 60
slice_configs = [
( 1200, 1800, 60 ),
( 600, 1200, 60 )]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = Mock(spec=CeresSlice)
slice_mock.startTime = start
slice_mock.endTime = end
slice_mock.timeStep = step
self.ceres_slices.append(slice_mock)
def test_init_sets_default_cache_behavior(self):
ceres_node = CeresNode(self.ceres_tree, 'sample_metric', '/graphite/storage/ceres/sample_metric')
self.assertEqual(DEFAULT_SLICE_CACHING_BEHAVIOR, ceres_node.sliceCachingBehavior)
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata')
def test_create_sets_a_default_timestep(self, write_metadata_mock):
ceres_node = CeresNode.create(self.ceres_tree, 'sample_metric')
write_metadata_mock.assert_called_with(dict(timeStep=DEFAULT_TIMESTEP))
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata', new=Mock())
def test_create_returns_new_ceres_node(self):
ceres_node = CeresNode.create(self.ceres_tree, 'sample_metric')
self.assertTrue(isinstance(ceres_node, CeresNode))
def test_write_metadata(self):
import json
open_mock = mock_open()
metadata = dict(timeStep=60, aggregationMethod='avg')
with patch('__builtin__.open', open_mock):
self.ceres_node.writeMetadata(metadata)
self.assertEquals(json.dumps(metadata), fetch_mock_open_writes(open_mock))
def test_read_metadata_sets_timestep(self):
import json
metadata = dict(timeStep=60, aggregationMethod='avg')
json_metadata = json.dumps(metadata)
open_mock = mock_open(read_data=json_metadata)
with patch('__builtin__.open', open_mock):
self.ceres_node.readMetadata()
open_mock().read.assert_called_once()
self.assertEqual(60, self.ceres_node.timeStep)
def test_set_slice_caching_behavior_validates_names(self):
self.ceres_node.setSliceCachingBehavior('none')
self.assertEquals('none', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('all')
self.assertEquals('all', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('latest')
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
self.assertRaises(ValueError, self.ceres_node.setSliceCachingBehavior, 'foo')
# Assert unchanged
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
def test_slices_is_a_generator(self):
from types import GeneratorType
self.assertTrue(isinstance(self.ceres_node.slices, GeneratorType))
def test_slices_returns_cached_set_when_behavior_is_all(self):
def mock_slice():
return Mock(spec=CeresSlice)
self.ceres_node.setSliceCachingBehavior('all')
cached_contents = [ mock_slice for c in range(4) ]
self.ceres_node.sliceCache = cached_contents
with patch('ceres.CeresNode.readSlices') as read_slices_mock:
slice_list = list(self.ceres_node.slices)
self.assertFalse(read_slices_mock.called)
self.assertEquals(cached_contents, slice_list)
def test_slices_returns_first_cached_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
self.assertEquals(cached_contents, slice_iter.next())
# We should be yielding cached before trying to read
self.assertFalse(read_slices_mock.called)
def test_slices_reads_remaining_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
# *now* we expect to read from disk
try:
while True:
slice_iter.next()
except StopIteration:
pass
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_behavior_is_none(self):
self.ceres_node.setSliceCachingBehavior('none')
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_all(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_latest(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0,60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
slice_iter.next()
read_slices_mock.assert_called_once_with()
@patch('ceres.exists', new=Mock(return_value=False))
def test_read_slices_raises_when_node_doesnt_exist(self):
self.assertRaises(NodeDeleted, self.ceres_node.readSlices)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_ignores_not_slices(self):
listdir_mock = Mock(return_value=['0@60.slice', '0@300.slice', 'foo'])
with patch('ceres.os.listdir', new=listdir_mock):
self.assertEquals(2, len(self.ceres_node.readSlices()))
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_parses_slice_filenames(self):
listdir_mock = Mock(return_value=['0@60.slice', '0@300.slice'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
self.assertTrue((0,60) in slice_infos)
self.assertTrue((0,300) in slice_infos)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_reverse_sorts_by_time(self):
listdir_mock = Mock(return_value=[
'0@60.slice',
'320@300.slice',
'120@120.slice',
'0@120.slice',
'600@300.slice'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
slice_timestamps = [ s[0] for s in slice_infos ]
self.assertEqual([600,320,120,0,0], slice_timestamps)
def test_no_data_exists_if_no_slices_exist(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(0,60))
def test_no_data_exists_if_no_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(None,None))
def test_data_exists_if_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.assertTrue(self.ceres_node.hasDataForInterval(None,None))
def test_data_exists_if_slice_covers_interval_completely(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1200,1800))
def test_data_exists_if_slice_covers_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(600, 1260))
def test_data_exists_if_slice_covers_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1740, 2100))
def test_no_data_exists_if_slice_touches_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(600, 1200))
def test_no_data_exists_if_slice_touches_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(1800, 2100))
def test_compact_returns_empty_if_passed_empty(self):
self.assertEqual([], self.ceres_node.compact([]))
def test_compact_filters_null_values(self):
self.assertEqual([], self.ceres_node.compact([(60,None)]))
def test_compact_rounds_timestamps_down_to_step(self):
self.assertEqual([[(600,0)]], self.ceres_node.compact([(605,0)]))
def test_compact_drops_duplicate_timestamps(self):
datapoints = [ (600, 0), (600, 0) ]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0)]], compacted)
def test_compact_groups_contiguous_points(self):
datapoints = [ (600, 0), (660, 0), (840,0) ]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0), (660,0)], [(840,0)]], compacted)
def test_write_noops_if_no_datapoints(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write([])
self.assertFalse(self.ceres_slices[0].write.called)
def test_write_within_first_slice(self):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_first_slice_doesnt_create(self, slice_create_mock):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_first_slice_with_gaps(self):
datapoints = [ (1200,0.0), (1320,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# sorted most recent first
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[0].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice(self):
datapoints = [ (720,0.0), (780,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# 2nd slice has this range
self.ceres_slices[1].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice_with_gaps(self):
datapoints = [ (720,0.0), (840,2.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[1].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_across_slice_boundaries(self):
datapoints = [ (1080,0.0), (1140,1.0), (1200, 2.0), (1260, 3.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints[2:4])
self.ceres_slices[1].write.assert_called_once_with(datapoints[0:2])
@patch('ceres.CeresSlice.create')
def test_create_during_write_clears_slice_cache(self, slice_create_mock):
self.ceres_node.setSliceCachingBehavior('all')
self.ceres_node.sliceCache = self.ceres_slices
datapoints = [ (300, 0.0) ]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertEquals(None, self.ceres_node.sliceCache)
class CeresSliceTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(self.ceres_tree, 'sample_metric', '/graphite/storage/ceres/sample_metric')
def test_init_sets_fspath_name(self):
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
self.assertTrue(ceres_slice.fsPath.endswith('0@60.slice'))
|
endlessm/chromium-browser | refs/heads/master | third_party/catapult/third_party/gae_ts_mon/gae_ts_mon/protobuf/google/protobuf/unittest_import_pb2.py | 35 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_import.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import unittest_import_public_pb2 as google_dot_protobuf_dot_unittest__import__public__pb2
from google.protobuf.unittest_import_public_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_import.proto',
package='protobuf_unittest_import',
syntax='proto2',
serialized_pb=_b('\n%google/protobuf/unittest_import.proto\x12\x18protobuf_unittest_import\x1a,google/protobuf/unittest_import_public.proto\"\x1a\n\rImportMessage\x12\t\n\x01\x64\x18\x01 \x01(\x05*<\n\nImportEnum\x12\x0e\n\nIMPORT_FOO\x10\x07\x12\x0e\n\nIMPORT_BAR\x10\x08\x12\x0e\n\nIMPORT_BAZ\x10\t*1\n\x10ImportEnumForMap\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x46OO\x10\x01\x12\x07\n\x03\x42\x41R\x10\x02\x42\x1f\n\x18\x63om.google.protobuf.testH\x01\xf8\x01\x01P\x00')
,
dependencies=[google_dot_protobuf_dot_unittest__import__public__pb2.DESCRIPTOR,],
public_dependencies=[google_dot_protobuf_dot_unittest__import__public__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IMPORTENUM = _descriptor.EnumDescriptor(
name='ImportEnum',
full_name='protobuf_unittest_import.ImportEnum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IMPORT_FOO', index=0, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAR', index=1, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAZ', index=2, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=141,
serialized_end=201,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUM)
ImportEnum = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUM)
_IMPORTENUMFORMAP = _descriptor.EnumDescriptor(
name='ImportEnumForMap',
full_name='protobuf_unittest_import.ImportEnumForMap',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FOO', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAR', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=203,
serialized_end=252,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUMFORMAP)
ImportEnumForMap = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUMFORMAP)
IMPORT_FOO = 7
IMPORT_BAR = 8
IMPORT_BAZ = 9
UNKNOWN = 0
FOO = 1
BAR = 2
_IMPORTMESSAGE = _descriptor.Descriptor(
name='ImportMessage',
full_name='protobuf_unittest_import.ImportMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='d', full_name='protobuf_unittest_import.ImportMessage.d', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=139,
)
DESCRIPTOR.message_types_by_name['ImportMessage'] = _IMPORTMESSAGE
DESCRIPTOR.enum_types_by_name['ImportEnum'] = _IMPORTENUM
DESCRIPTOR.enum_types_by_name['ImportEnumForMap'] = _IMPORTENUMFORMAP
ImportMessage = _reflection.GeneratedProtocolMessageType('ImportMessage', (_message.Message,), dict(
DESCRIPTOR = _IMPORTMESSAGE,
__module__ = 'google.protobuf.unittest_import_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest_import.ImportMessage)
))
_sym_db.RegisterMessage(ImportMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.protobuf.testH\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
yograterol/django | refs/heads/master | tests/test_client_regress/session.py | 373 | from django.contrib.sessions.backends.base import SessionBase
class SessionStore(SessionBase):
"""
A simple cookie-based session storage implementation.
The session key is actually the session data, pickled and encoded.
This means that saving the session will change the session key.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def exists(self, session_key):
return False
def create(self):
self._session_key = self.encode({})
def save(self, must_create=False):
self._session_key = self.encode(self._session)
def delete(self, session_key=None):
self._session_key = self.encode({})
def load(self):
try:
return self.decode(self.session_key)
except:
self.modified = True
return {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.