repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
CapOM/ChromiumGStreamerBackend | tools/isolate_driver.py | 2 | 12176 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Adaptor script called through build/isolate.gypi.
Creates a wrapping .isolate which 'includes' the original one, that can be
consumed by tools/swarming_client/isolate.py. Path variables are determined
based on the current working directory. The relative_cwd in the .isolated file
is determined based on the .isolate file that declare the 'command' variable to
be used so the wrapping .isolate doesn't affect this value.
This script loads build.ninja and processes it to determine all the executables
referenced by the isolated target. It adds them in the wrapping .isolate file.
WARNING: The target to use for build.ninja analysis is the base name of the
.isolate file plus '_run'. For example, 'foo_test.isolate' would have the target
'foo_test_run' analysed.
"""
import glob
import json
import logging
import os
import posixpath
import re
import StringIO
import subprocess
import sys
import time
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
SWARMING_CLIENT_DIR = os.path.join(TOOLS_DIR, 'swarming_client')
SRC_DIR = os.path.dirname(TOOLS_DIR)
sys.path.insert(0, SWARMING_CLIENT_DIR)
import isolate_format
def load_ninja_recursively(build_dir, ninja_path, build_steps):
"""Crudely extracts all the subninja and build referenced in ninja_path.
In particular, it ignores rule and variable declarations. The goal is to be
performant (well, as much as python can be performant) which is currently in
the <200ms range for a complete chromium tree. As such the code is laid out
for performance instead of readability.
"""
logging.debug('Loading %s', ninja_path)
try:
with open(os.path.join(build_dir, ninja_path), 'rb') as f:
line = None
merge_line = ''
subninja = []
for line in f:
line = line.rstrip()
if not line:
continue
if line[-1] == '$':
# The next line needs to be merged in.
merge_line += line[:-1]
continue
if merge_line:
line = merge_line + line
merge_line = ''
statement = line[:line.find(' ')]
if statement == 'build':
# Save the dependency list as a raw string. Only the lines needed will
# be processed with raw_build_to_deps(). This saves a good 70ms of
# processing time.
build_target, dependencies = line[6:].split(': ', 1)
# Interestingly, trying to be smart and only saving the build steps
# with the intended extensions ('', '.stamp', '.so') slows down
# parsing even if 90% of the build rules can be skipped.
# On Windows, a single step may generate two target, so split items
# accordingly. It has only been seen for .exe/.exe.pdb combos.
for i in build_target.strip().split():
build_steps[i] = dependencies
elif statement == 'subninja':
subninja.append(line[9:])
except IOError:
print >> sys.stderr, 'Failed to open %s' % ninja_path
raise
total = 1
for rel_path in subninja:
try:
# Load each of the files referenced.
# TODO(maruel): Skip the files known to not be needed. It saves an aweful
# lot of processing time.
total += load_ninja_recursively(build_dir, rel_path, build_steps)
except IOError:
print >> sys.stderr, '... as referenced by %s' % ninja_path
raise
return total
def load_ninja(build_dir):
"""Loads the tree of .ninja files in build_dir.
Returns:
dict(target: list of dependencies).
"""
build_steps = {}
total = load_ninja_recursively(build_dir, 'build.ninja', build_steps)
logging.info('Loaded %d ninja files, %d build steps', total, len(build_steps))
return build_steps
def using_blacklist(item):
"""Returns True if an item should be analyzed.
Ignores many rules that are assumed to not depend on a dynamic library. If
the assumption doesn't hold true anymore for a file format, remove it from
this list. This is simply an optimization.
"""
# *.json is ignored below, *.isolated.gen.json is an exception, it is produced
# by isolate_driver.py in 'test_isolation_mode==prepare'.
if item.endswith('.isolated.gen.json'):
return True
IGNORED = (
'.a', '.cc', '.css', '.dat', '.def', '.frag', '.h', '.html', '.isolate',
'.js', '.json', '.manifest', '.o', '.obj', '.pak', '.png', '.pdb', '.py',
'.strings', '.test', '.txt', '.vert',
)
# ninja files use native path format.
ext = os.path.splitext(item)[1]
if ext in IGNORED:
return False
# Special case Windows, keep .dll.lib but discard .lib.
if sys.platform == 'win32':
if item.endswith('.dll.lib'):
return True
if ext == '.lib':
return False
return item not in ('', '|', '||')
# This is a whitelist of known ninja native rules.
KNOWN_TOOLS = frozenset(
(
'copy',
'copy_infoplist',
'cxx',
'idl',
'link',
'link_embed',
'mac_tool',
'package_framework',
'phony',
'rc',
'solink',
'solink_embed',
'solink_module',
'solink_module_embed',
'solink_module_notoc',
'solink_notoc',
'stamp',
))
def raw_build_to_deps(item):
"""Converts a raw ninja build statement into the list of interesting
dependencies.
"""
items = filter(None, item.split(' '))
for i in xrange(len(items) - 2, 0, -1):
# Merge back '$ ' escaping.
# OMG please delete this code as soon as possible.
if items[i].endswith('$'):
items[i] = items[i][:-1] + ' ' + items[i+1]
items.pop(i+1)
# Always skip the first item; it is the build rule type, e.g. , etc.
if items[0] not in KNOWN_TOOLS:
# Check for phony ninja rules.
assert re.match(r'^[^.]+_[0-9a-f]{32}$', items[0]), items
return filter(using_blacklist, items[1:])
def collect_deps(target, build_steps, dependencies_added, rules_seen):
"""Recursively adds all the interesting dependencies for |target|
into |dependencies_added|.
"""
if rules_seen is None:
rules_seen = set()
if target in rules_seen:
# TODO(maruel): Figure out how it happens.
logging.warning('Circular dependency for %s!', target)
return
rules_seen.add(target)
try:
dependencies = raw_build_to_deps(build_steps[target])
except KeyError:
logging.info('Failed to find a build step to generate: %s', target)
return
logging.debug('collect_deps(%s) -> %s', target, dependencies)
for dependency in dependencies:
dependencies_added.add(dependency)
collect_deps(dependency, build_steps, dependencies_added, rules_seen)
def post_process_deps(build_dir, dependencies):
"""Processes the dependency list with OS specific rules.
Returns:
list of dependencies to add.
"""
out = []
for i in dependencies:
if os.path.isabs(i):
# In some rare case, there's dependency set explicitly on files outside
# the checkout. In practice, it was observed on /usr/bin/eu-strip only on
# official Chrome build.
continue
if os.path.isdir(os.path.join(build_dir, i)):
if sys.platform == 'darwin':
# This is an application.
out.append(i + '/')
elif i.endswith('.so.TOC'):
out.append(i[:-4])
elif i.endswith('.dylib.TOC'):
i = i[:-4]
out.append(i)
# Debug symbols may not be present.
i += '.dSym'
if os.path.isdir(os.path.join(build_dir, i)):
out.append(i + '/')
elif i.endswith('.dll.lib'):
i = i[:-4]
out.append(i)
# Naming is inconsistent.
if os.path.isfile(os.path.join(build_dir, i + '.pdb')):
out.append(i + '.pdb')
if os.path.isfile(os.path.join(build_dir, i[:-4] + '.pdb')):
out.append(i[:-4] + '.pdb')
elif i.endswith('.exe'):
out.append(i)
# Naming is inconsistent.
if os.path.isfile(os.path.join(build_dir, i + '.pdb')):
out.append(i + '.pdb')
if os.path.isfile(os.path.join(build_dir, i[:-4] + '.pdb')):
out.append(i[:-4] + '.pdb')
elif i.endswith('.nexe'):
out.append(i)
i += '.debug'
if os.path.isfile(os.path.join(build_dir, i)):
out.append(i)
elif sys.platform != 'win32':
# On POSIX, executables have no extension.
if not os.path.splitext(i)[1]:
out.append(i)
return out
def create_wrapper(args, isolate_index, isolated_index):
"""Creates a wrapper .isolate that add dynamic libs.
The original .isolate is not modified.
"""
cwd = os.getcwd()
isolate = args[isolate_index]
# The code assumes the .isolate file is always specified path-less in cwd. Fix
# if this assumption doesn't hold true.
assert os.path.basename(isolate) == isolate, isolate
# This will look like ../out/Debug. This is based against cwd. Note that this
# must equal the value provided as PRODUCT_DIR.
build_dir = os.path.dirname(args[isolated_index])
# This will look like chrome/unit_tests.isolate. It is based against SRC_DIR.
# It's used to calculate temp_isolate.
src_isolate = os.path.relpath(os.path.join(cwd, isolate), SRC_DIR)
# The wrapping .isolate. This will look like
# ../out/Debug/gen/chrome/unit_tests.isolate.
temp_isolate = os.path.join(build_dir, 'gen', src_isolate)
temp_isolate_dir = os.path.dirname(temp_isolate)
# Relative path between the new and old .isolate file.
isolate_relpath = os.path.relpath(
'.', temp_isolate_dir).replace(os.path.sep, '/')
# It's a big assumption here that the name of the isolate file matches the
# primary target '_run'. Fix accordingly if this doesn't hold true, e.g.
# complain to maruel@.
target = isolate[:-len('.isolate')] + '_run'
build_steps = load_ninja(build_dir)
binary_deps = set()
collect_deps(target, build_steps, binary_deps, None)
binary_deps = post_process_deps(build_dir, binary_deps)
logging.debug(
'Binary dependencies:%s', ''.join('\n ' + i for i in binary_deps))
# Now do actual wrapping .isolate.
isolate_dict = {
'includes': [
posixpath.join(isolate_relpath, isolate),
],
'variables': {
# Will look like ['<(PRODUCT_DIR)/lib/flibuser_prefs.so'].
'files': sorted(
'<(PRODUCT_DIR)/%s' % i.replace(os.path.sep, '/')
for i in binary_deps),
},
}
if not os.path.isdir(temp_isolate_dir):
os.makedirs(temp_isolate_dir)
comment = (
'# Warning: this file was AUTOGENERATED.\n'
'# DO NO EDIT.\n')
out = StringIO.StringIO()
isolate_format.print_all(comment, isolate_dict, out)
isolate_content = out.getvalue()
with open(temp_isolate, 'wb') as f:
f.write(isolate_content)
logging.info('Added %d dynamic libs', len(binary_deps))
logging.debug('%s', isolate_content)
args[isolate_index] = temp_isolate
def prepare_isolate_call(args, output):
"""Gathers all information required to run isolate.py later.
Dumps it as JSON to |output| file.
"""
with open(output, 'wb') as f:
json.dump({
'args': args,
'dir': os.getcwd(),
'version': 1,
}, f, indent=2, sort_keys=True)
def main():
logging.basicConfig(level=logging.ERROR, format='%(levelname)7s %(message)s')
args = sys.argv[1:]
mode = args[0] if args else None
isolate = None
isolated = None
for i, arg in enumerate(args):
if arg == '--isolate':
isolate = i + 1
if arg == '--isolated':
isolated = i + 1
if isolate is None or isolated is None or not mode:
print >> sys.stderr, 'Internal failure'
return 1
create_wrapper(args, isolate, isolated)
# In 'prepare' mode just collect all required information for postponed
# isolated.py invocation later, store it in *.isolated.gen.json file.
if mode == 'prepare':
prepare_isolate_call(args[1:], args[isolated] + '.gen.json')
return 0
swarming_client = os.path.join(SRC_DIR, 'tools', 'swarming_client')
sys.stdout.flush()
result = subprocess.call(
[sys.executable, os.path.join(swarming_client, 'isolate.py')] + args)
return result
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pypot/scikit-learn | sklearn/decomposition/nmf.py | 16 | 19101 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
dagorret/YouCompleteMe | third_party/pythonfutures/docs/conf.py | 65 | 6302 | # -*- coding: utf-8 -*-
#
# futures documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 3 19:35:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'futures'
copyright = u'2009-2011, Brian Quinlan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.3'
# The full version, including alpha/beta/rc tags.
release = '2.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'futuresdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'futures.tex', u'futures Documentation',
u'Brian Quinlan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-3.0 |
JimCircadian/ansible | test/units/modules/network/netscaler/test_netscaler_gslb_site.py | 18 | 24201 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerGSLBSiteModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite.gslbsite': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerGSLBSiteModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerGSLBSiteModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_gslb_site
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_gslb_site.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_gslb_site.nitro_exception', MockException):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
gslb_site_proxy_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=Mock(return_value=client_mock),
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
gslb_site_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[True, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=Mock(return_value=gslb_site_proxy_mock),
):
self.module = netscaler_gslb_site
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_new_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
gslb_site_exists=Mock(side_effect=[False, True]),
gslb_site_identical=Mock(side_effect=[True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.add()])
def test_modified_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.update()])
def test_absent_gslb_site_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, False]),
gslb_site_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_has_calls([call.delete()])
def test_present_gslb_site_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_not_called()
def test_absent_gslb_site_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[False, False]),
gslb_site_identical=Mock(side_effect=[False, False]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
self.exited()
gslb_site_proxy_mock.assert_not_called()
def test_present_gslb_site_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site differs from configured')
self.assertTrue(result['failed'])
def test_present_gslb_site_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[False, False]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site does not exist')
self.assertTrue(result['failed'])
def test_present_gslb_site_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_gslb_site_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
client_mock = Mock()
m = Mock(return_value=client_mock)
glsb_site_proxy_attrs = {
'diff_object.return_value': {},
}
gslb_site_proxy_mock = Mock()
gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs)
config_proxy_mock = Mock(return_value=gslb_site_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
gslb_site_exists=Mock(side_effect=[True, True]),
gslb_site_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertEqual(result['msg'], 'GSLB site still exists')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
gslb_site_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_gslb_site
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_gslb_site',
gslb_site_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_gslb_site
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
| gpl-3.0 |
mrrrgn/build-mozharness | mozinfo/__init__.py | 72 | 1170 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
interface to transform introspected system information to a format palatable to
Mozilla
Module variables:
.. attribute:: bits
32 or 64
.. attribute:: isBsd
Returns ``True`` if the operating system is BSD
.. attribute:: isLinux
Returns ``True`` if the operating system is Linux
.. attribute:: isMac
Returns ``True`` if the operating system is Mac
.. attribute:: isWin
Returns ``True`` if the operating system is Windows
.. attribute:: os
Operating system [``'win'``, ``'mac'``, ``'linux'``, ...]
.. attribute:: processor
Processor architecture [``'x86'``, ``'x86_64'``, ``'ppc'``, ...]
.. attribute:: version
Operating system version string. For windows, the service pack information is also included
.. attribute:: info
Returns information identifying the current system.
* :attr:`bits`
* :attr:`os`
* :attr:`processor`
* :attr:`version`
"""
import mozinfo
from mozinfo import *
__all__ = mozinfo.__all__
| mpl-2.0 |
afedchin/xbmctorrent | resources/site-packages/concurrent/futures/_compat.py | 179 | 4645 | from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
| gpl-3.0 |
mnikhil-git/zerorpc-python | tests/test_server.py | 20 | 5203 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_server_manual():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_channel.emit('lolita', tuple())
event = client_channel.recv()
assert event.args == (42,)
client_channel.close()
client_channel = client.channel()
client_channel.emit('add', (1, 2))
event = client_channel.recv()
assert event.args == (3,)
client_channel.close()
srv.stop()
def test_client_server():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client()
client.connect(endpoint)
print client.lolita()
assert client.lolita() == 42
print client.add(1, 4)
assert client.add(1, 4) == 5
def test_client_server_client_timeout():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
gevent.sleep(10)
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
with assert_raises(zerorpc.TimeoutExpired):
print client.add(1, 4)
client.close()
srv.close()
def test_client_server_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_something(self, a):
return a[4]
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
with assert_raises(zerorpc.RemoteError):
print client.raise_something(42)
assert client.raise_something(range(5)) == 4
client.close()
srv.close()
def test_client_server_detailed_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_error(self):
raise RuntimeError('oops!')
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
with assert_raises(zerorpc.RemoteError):
print client.raise_error()
try:
client.raise_error()
except zerorpc.RemoteError as e:
print 'got that:', e
print 'name', e.name
print 'msg', e.msg
assert e.name == 'RuntimeError'
assert e.msg == 'oops!'
client.close()
srv.close()
def test_exception_compat_v1():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
pass
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
rpccall = client.channel()
rpccall.emit('donotexist', tuple())
event = rpccall.recv()
print event
assert event.name == 'ERR'
(name, msg, tb) = event.args
print 'detailed error', name, msg, tb
assert name == 'NameError'
assert msg == 'donotexist'
rpccall = client.channel()
rpccall.emit('donotexist', tuple(), xheader=dict(v=1))
event = rpccall.recv()
print event
assert event.name == 'ERR'
(msg,) = event.args
print 'msg only', msg
assert msg == "NameError('donotexist',)"
client_events.close()
srv.close()
| mit |
rackerlabs/horizon | openstack_dashboard/dashboards/admin/domains/views.py | 2 | 2980 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.domains.constants \
import DOMAIN_INFO_FIELDS
from openstack_dashboard.dashboards.admin.domains.constants \
import DOMAINS_INDEX_URL
from openstack_dashboard.dashboards.admin.domains.constants \
import DOMAINS_INDEX_VIEW_TEMPLATE
from openstack_dashboard.dashboards.admin.domains.tables import DomainsTable
from openstack_dashboard.dashboards.admin.domains.workflows \
import CreateDomain
from openstack_dashboard.dashboards.admin.domains.workflows \
import UpdateDomain
class IndexView(tables.DataTableView):
table_class = DomainsTable
template_name = DOMAINS_INDEX_VIEW_TEMPLATE
def get_data(self):
domains = []
domain_context = self.request.session.get('domain_context', None)
try:
if domain_context:
domain = api.keystone.domain_get(self.request,
domain_context)
domains.append(domain)
else:
domains = api.keystone.domain_list(self.request)
except:
exceptions.handle(self.request,
_('Unable to retrieve domain list.'))
return domains
class CreateDomainView(workflows.WorkflowView):
workflow_class = CreateDomain
class UpdateDomainView(workflows.WorkflowView):
workflow_class = UpdateDomain
def get_initial(self):
initial = super(UpdateDomainView, self).get_initial()
domain_id = self.kwargs['domain_id']
initial['domain_id'] = domain_id
try:
# get initial domain info
domain_info = api.keystone.domain_get(self.request,
domain_id)
for field in DOMAIN_INFO_FIELDS:
initial[field] = getattr(domain_info, field, None)
except:
exceptions.handle(self.request,
_('Unable to retrieve domain details.'),
redirect=reverse(DOMAINS_INDEX_URL))
return initial
| apache-2.0 |
hopeall/odoo | addons/delivery/__openerp__.py | 224 | 1905 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Delivery Costs',
'version': '1.0',
'category': 'Sales Management',
'description': """
Allows you to add delivery methods in sale orders and picking.
==============================================================
You can define your own carrier and delivery grids for prices. When creating
invoices from picking, OpenERP is able to add and compute the shipping line.
""",
'author': 'OpenERP SA',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'delivery_view.xml',
'partner_view.xml',
'delivery_data.xml',
'views/report_shipping.xml',
],
'demo': ['delivery_demo.xml'],
'test': ['test/delivery_cost.yml',
'test/stock_move_values_with_invoice_before_delivery.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
itskewpie/tempest | tempest/services/object_storage/object_client.py | 5 | 7794 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import http
from tempest.common.rest_client import RestClient
from tempest import exceptions
class ObjectClient(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(ObjectClient, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.object_storage.catalog_type
def create_object(self, container, object_name, data):
"""Create storage object."""
headers = dict(self.headers)
if not data:
headers['content-length'] = '0'
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.put(url, data, headers)
return resp, body
def update_object(self, container, object_name, data):
"""Upload data to replace current storage object."""
return self.create_object(container, object_name, data)
def delete_object(self, container, object_name):
"""Delete storage object."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.delete(url)
return resp, body
def update_object_metadata(self, container, object_name, metadata,
metadata_prefix='X-Object-Meta-'):
"""Add, remove, or change X-Object-Meta metadata for storage object."""
headers = {}
for key in metadata:
headers["%s%s" % (str(metadata_prefix), str(key))] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.post(url, None, headers=headers)
return resp, body
def list_object_metadata(self, container, object_name):
"""List all storage object X-Object-Meta- metadata."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.head(url)
return resp, body
def get_object(self, container, object_name):
"""Retrieve object's data."""
url = "{0}/{1}".format(container, object_name)
resp, body = self.get(url)
return resp, body
def copy_object_in_same_container(self, container, src_object_name,
dest_object_name, metadata=None):
"""Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(container, dest_object_name)
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(container),
str(src_object_name))
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.put(url, None, headers=headers)
return resp, body
def copy_object_across_containers(self, src_container, src_object_name,
dst_container, dst_object_name,
metadata=None):
"""Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(dst_container, dst_object_name)
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(src_container),
str(src_object_name))
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.put(url, None, headers=headers)
return resp, body
def copy_object_2d_way(self, container, src_object_name, dest_object_name,
metadata=None):
"""Copy storage object's data to the new object using COPY."""
url = "{0}/{1}".format(container, src_object_name)
headers = {}
headers['Destination'] = "%s/%s" % (str(container),
str(dest_object_name))
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.copy(url, headers=headers)
return resp, body
def create_object_segments(self, container, object_name, segment, data):
"""Creates object segments."""
url = "{0}/{1}/{2}".format(container, object_name, segment)
resp, body = self.put(url, data, self.headers)
return resp, body
def get_object_using_temp_url(self, url):
"""Retrieve object's data using temp URL."""
return self.get(url)
def put_object_using_temp_url(self, url, data):
"""Put data in an object using temp URL."""
return self.put(url, data, None)
class ObjectClientCustomizedHeader(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(ObjectClientCustomizedHeader, self).__init__(config, username,
password, auth_url,
tenant_name)
# Overwrites json-specific header encoding in RestClient
self.service = self.config.object_storage.catalog_type
self.format = 'json'
def request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
dscv = self.config.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
if headers is None:
headers = {}
if self.base_url is None:
self._set_auth()
req_url = "%s/%s" % (self.base_url, url)
self._log_request(method, req_url, headers, body)
resp, resp_body = self.http_obj.request(req_url, method,
headers=headers, body=body)
self._log_response(resp, resp_body)
if resp.status == 401 or resp.status == 403:
raise exceptions.Unauthorized()
return resp, resp_body
def get_object(self, container, object_name, metadata=None):
"""Retrieve object's data."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
url = "{0}/{1}".format(container, object_name)
resp, body = self.get(url, headers=headers)
return resp, body
def create_object(self, container, object_name, data, metadata=None):
"""Create storage object."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
if not data:
headers['content-length'] = '0'
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.put(url, data, headers=headers)
return resp, body
def delete_object(self, container, object_name, metadata=None):
"""Delete storage object."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.delete(url, headers=headers)
return resp, body
| apache-2.0 |
TeachAtTUM/edx-platform | lms/djangoapps/grades/config/models.py | 12 | 3085 | """
Models for configuration of the feature flags
controlling persistent grades.
"""
from config_models.models import ConfigurationModel
from django.conf import settings
from django.db.models import BooleanField, IntegerField, TextField
from opaque_keys.edx.django.models import CourseKeyField
from six import text_type
from openedx.core.djangoapps.request_cache.middleware import request_cached
class PersistentGradesEnabledFlag(ConfigurationModel):
"""
Enables persistent grades across the platform.
When this feature flag is set to true, individual courses
must also have persistent grades enabled for the
feature to take effect.
"""
# this field overrides course-specific settings to enable the feature for all courses
enabled_for_all_courses = BooleanField(default=False)
@classmethod
@request_cached
def feature_enabled(cls, course_id=None):
"""
Looks at the currently active configuration model to determine whether
the persistent grades feature is available.
If the flag is not enabled, the feature is not available.
If the flag is enabled and the provided course_id is for an course
with persistent grades enabled, the feature is available.
If the flag is enabled and no course ID is given,
we return True since the global setting is enabled.
"""
if settings.FEATURES.get('PERSISTENT_GRADES_ENABLED_FOR_ALL_TESTS'):
return True
if not PersistentGradesEnabledFlag.is_enabled():
return False
elif not PersistentGradesEnabledFlag.current().enabled_for_all_courses and course_id:
effective = CoursePersistentGradesFlag.objects.filter(course_id=course_id).order_by('-change_date').first()
return effective.enabled if effective is not None else False
return True
class Meta(object):
app_label = "grades"
def __unicode__(self):
current_model = PersistentGradesEnabledFlag.current()
return u"PersistentGradesEnabledFlag: enabled {}".format(
current_model.is_enabled()
)
class CoursePersistentGradesFlag(ConfigurationModel):
"""
Enables persistent grades for a specific
course. Only has an effect if the general
flag above is set to True.
"""
KEY_FIELDS = ('course_id',)
class Meta(object):
app_label = "grades"
# The course that these features are attached to.
course_id = CourseKeyField(max_length=255, db_index=True)
def __unicode__(self):
not_en = "Not "
if self.enabled:
not_en = ""
# pylint: disable=no-member
return u"Course '{}': Persistent Grades {}Enabled".format(text_type(self.course_id), not_en)
class ComputeGradesSetting(ConfigurationModel):
"""
...
"""
class Meta(object):
app_label = "grades"
batch_size = IntegerField(default=100)
course_ids = TextField(
blank=False,
help_text="Whitespace-separated list of course keys for which to compute grades."
)
| agpl-3.0 |
cyclecomputing/boto | boto/manage/propget.py | 115 | 2498 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def get(prop, choices=None):
prompt = prop.verbose_name
if not prompt:
prompt = prop.name
if choices:
if callable(choices):
choices = choices()
else:
choices = prop.get_choices()
valid = False
while not valid:
if choices:
min = 1
max = len(choices)
for i in range(min, max+1):
value = choices[i-1]
if isinstance(value, tuple):
value = value[0]
print '[%d] %s' % (i, value)
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
try:
int_value = int(value)
value = choices[int_value-1]
if isinstance(value, tuple):
value = value[1]
valid = True
except ValueError:
print '%s is not a valid choice' % value
except IndexError:
print '%s is not within the range[%d-%d]' % (min, max)
else:
value = raw_input('%s: ' % prompt)
try:
value = prop.validate(value)
if prop.empty(value) and prop.required:
print 'A value is required'
else:
valid = True
except:
print 'Invalid value: %s' % value
return value
| mit |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.6/Lib/email/mime/message.py | 573 | 1286 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Class representing message/* MIME documents."""
__all__ = ['MIMEMessage']
from email import message
from email.mime.nonmultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
| mit |
dlazz/ansible | lib/ansible/module_utils/network/common/utils.py | 14 | 15929 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Networking tools for network modules only
import re
import ast
import operator
import socket
from itertools import chain
from socket import inet_aton
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.basic import AnsibleFallbackNotFound
# Backwards compatibility for 3rd party modules
from ansible.module_utils.common.network import (
to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS
)
try:
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError
HAS_JINJA2 = True
except ImportError:
HAS_JINJA2 = False
OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le'])
ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')])
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = to_text(item).split('\n')
yield item
def transform_commands(module):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(),
prompt=dict(type='list'),
answer=dict(type='list'),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(module.params['commands'])
def sort_list(val):
if isinstance(val, list):
return sorted(val)
return val
class Entity(object):
"""Transforms a dict to with an argument spec
This class will take a dict and apply an Ansible argument spec to the
values. The resulting dict will contain all of the keys in the param
with appropriate values set.
Example::
argument_spec = dict(
command=dict(key=True),
display=dict(default='text', choices=['text', 'json']),
validate=dict(type='bool')
)
transform = Entity(module, argument_spec)
value = dict(command='foo')
result = transform(value)
print result
{'command': 'foo', 'display': 'text', 'validate': None}
Supported argument spec:
* key - specifies how to map a single value to a dict
* read_from - read and apply the argument_spec from the module
* required - a value is required
* type - type of value (uses AnsibleModule type checker)
* fallback - implements fallback function
* choices - set of valid options
* default - default value
"""
def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False):
args = [] if args is None else args
self._attributes = attrs or {}
self._module = module
for arg in args:
self._attributes[arg] = dict()
if from_argspec:
self._attributes[arg]['read_from'] = arg
if keys and arg in keys:
self._attributes[arg]['key'] = True
self.attr_names = frozenset(self._attributes.keys())
_has_key = False
for name, attr in iteritems(self._attributes):
if attr.get('read_from'):
if attr['read_from'] not in self._module.argument_spec:
module.fail_json(msg='argument %s does not exist' % attr['read_from'])
spec = self._module.argument_spec.get(attr['read_from'])
for key, value in iteritems(spec):
if key not in attr:
attr[key] = value
if attr.get('key'):
if _has_key:
module.fail_json(msg='only one key value can be specified')
_has_key = True
attr['required'] = True
def serialize(self):
return self._attributes
def to_dict(self, value):
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get('key'):
obj[name] = value
else:
obj[name] = attr.get('default')
return obj
def __call__(self, value, strict=True):
if not isinstance(value, dict):
value = self.to_dict(value)
if strict:
unknown = set(value).difference(self.attr_names)
if unknown:
self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown))
for name, attr in iteritems(self._attributes):
if value.get(name) is None:
value[name] = attr.get('default')
if attr.get('fallback') and not value.get(name):
fallback = attr.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
value[name] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
if attr.get('required') and value.get(name) is None:
self._module.fail_json(msg='missing required attribute %s' % name)
if 'choices' in attr:
if value[name] not in attr['choices']:
self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name]))
if value[name] is not None:
value_type = attr.get('type', 'str')
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(value[name])
elif value.get(name):
value[name] = self._module.params[name]
return value
class EntityCollection(Entity):
"""Extends ```Entity``` to handle a list of dicts """
def __call__(self, iterable, strict=True):
if iterable is None:
iterable = [super(EntityCollection, self).__call__(self._module.params, strict)]
if not isinstance(iterable, (list, tuple)):
self._module.fail_json(msg='value must be an iterable')
return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable]
# these two are for backwards compatibility and can be removed once all of the
# modules that use them are updated
class ComplexDict(Entity):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
class ComplexList(EntityCollection):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
def dict_diff(base, comparable):
""" Generate a dict object of differences
This function will compare two dict objects and return the difference
between them as a dict object. For scalar values, the key will reflect
the updated value. If the key does not exist in `comparable`, then then no
key will be returned. For lists, the value in comparable will wholly replace
the value in base for the key. For dicts, the returned value will only
return keys that are different.
:param base: dict object to base the diff on
:param comparable: dict object to compare against base
:returns: new dict object with differences
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(comparable, dict):
raise AssertionError("`comparable` must be of type <dict>")
updates = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
item = comparable.get(key)
if item is not None:
updates[key] = dict_diff(value, comparable[key])
else:
comparable_value = comparable.get(key)
if comparable_value is not None:
if sort_list(base[key]) != sort_list(comparable_value):
updates[key] = comparable_value
for key in set(comparable.keys()).difference(base.keys()):
updates[key] = comparable.get(key)
return updates
def dict_merge(base, other):
""" Return a new dict object that combines base and other
This will create a new dict object that is a combination of the key/value
pairs from base and other. When both keys exist, the value will be
selected from other. If the value is a list object, the two lists will
be combined and duplicate entries removed.
:param base: dict object to serve as base
:param other: dict object to combine with base
:returns: new combined dict object
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(other, dict):
raise AssertionError("`other` must be of type <dict>")
combined = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
if key in other:
item = other.get(key)
if item is not None:
if isinstance(other[key], Mapping):
combined[key] = dict_merge(value, other[key])
else:
combined[key] = other[key]
else:
combined[key] = item
else:
combined[key] = value
elif isinstance(value, list):
if key in other:
item = other.get(key)
if item is not None:
try:
combined[key] = list(set(chain(value, item)))
except TypeError:
value.extend([i for i in item if i not in value])
combined[key] = value
else:
combined[key] = item
else:
combined[key] = value
else:
if key in other:
other_value = other.get(key)
if other_value is not None:
if sort_list(base[key]) != sort_list(other_value):
combined[key] = other_value
else:
combined[key] = value
else:
combined[key] = other_value
else:
combined[key] = value
for key in set(other.keys()).difference(base.keys()):
combined[key] = other.get(key)
return combined
def conditional(expr, val, cast=None):
match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I)
if match:
op, arg = match.groups()
else:
op = 'eq'
if ' ' in str(expr):
raise AssertionError('invalid expression: cannot contain spaces')
arg = expr
if cast is None and val is not None:
arg = type(val)(arg)
elif callable(cast):
arg = cast(arg)
val = cast(val)
op = next((oper for alias, oper in ALIASES if op == alias), op)
if not hasattr(operator, op) and op not in OPERATORS:
raise ValueError('unknown operator: %s' % op)
func = getattr(operator, op)
return func(val, arg)
def ternary(value, true_val, false_val):
''' value ? true_val : false_val '''
if value:
return true_val
else:
return false_val
def remove_default_spec(spec):
for item in spec:
if 'default' in spec[item]:
del spec[item]['default']
def validate_ip_address(address):
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
def validate_ip_v6_address(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
def validate_prefix(prefix):
if prefix and not 0 <= int(prefix) <= 32:
return False
return True
def load_provider(spec, args):
provider = args.get('provider', {})
for key, value in iteritems(spec):
if key not in provider:
if key in args:
provider[key] = args[key]
elif 'fallback' in value:
provider[key] = _fallback(value['fallback'])
elif 'default' in value:
provider[key] = value['default']
else:
provider[key] = None
args['provider'] = provider
return provider
def _fallback(fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
class Template:
def __init__(self):
if not HAS_JINJA2:
raise ImportError("jinja2 is required but does not appear to be installed. "
"It can be installed using `pip install jinja2`")
self.env = Environment(undefined=StrictUndefined)
self.env.filters.update({'ternary': ternary})
def __call__(self, value, variables=None, fail_on_undefined=True):
variables = variables or {}
if not self.contains_vars(value):
return value
try:
value = self.env.from_string(value).render(variables)
except UndefinedError:
if not fail_on_undefined:
return None
raise
if value:
try:
return ast.literal_eval(value)
except Exception:
return str(value)
else:
return None
def contains_vars(self, data):
if isinstance(data, string_types):
for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string):
if marker in data:
return True
return False
| gpl-3.0 |
root-mirror/root | interpreter/llvm/src/tools/clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py | 36 | 7783 | # -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import libear
from . import make_args, silent_check_call, silent_call, create_empty_file
import unittest
import os.path
import json
class CompilationDatabaseTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, args):
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + args
silent_check_call(
['intercept-build', '--cdb', result] + make)
return result
@staticmethod
def count_entries(filename):
with open(filename, 'r') as handler:
content = json.load(handler)
return len(content)
def test_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, ['build_regular'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
def test_successful_build_with_wrapper(self):
with libear.TemporaryDirectory() as tmpdir:
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + ['build_regular']
silent_check_call(['intercept-build', '--cdb', result,
'--override-compiler'] + make)
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
@unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu make return -11')
def test_successful_build_parallel(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, ['-j', '4', 'build_regular'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
@unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu env remove clang from path')
def test_successful_build_on_empty_env(self):
with libear.TemporaryDirectory() as tmpdir:
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + ['CC=clang', 'build_regular']
silent_check_call(['intercept-build', '--cdb', result,
'env', '-'] + make)
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
def test_successful_build_all_in_one(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, ['build_all_in_one'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
def test_not_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + ['build_broken']
silent_call(
['intercept-build', '--cdb', result] + make)
self.assertTrue(os.path.isfile(result))
self.assertEqual(2, self.count_entries(result))
class ExitCodeTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, target):
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + [target]
return silent_call(
['intercept-build', '--cdb', result] + make)
def test_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
exitcode = self.run_intercept(tmpdir, 'build_clean')
self.assertFalse(exitcode)
def test_not_successful_build(self):
with libear.TemporaryDirectory() as tmpdir:
exitcode = self.run_intercept(tmpdir, 'build_broken')
self.assertTrue(exitcode)
class ResumeFeatureTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, target, args):
result = os.path.join(tmpdir, 'cdb.json')
make = make_args(tmpdir) + [target]
silent_check_call(
['intercept-build', '--cdb', result] + args + make)
return result
@staticmethod
def count_entries(filename):
with open(filename, 'r') as handler:
content = json.load(handler)
return len(content)
def test_overwrite_existing_cdb(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, 'build_clean', [])
self.assertTrue(os.path.isfile(result))
result = self.run_intercept(tmpdir, 'build_regular', [])
self.assertTrue(os.path.isfile(result))
self.assertEqual(2, self.count_entries(result))
def test_append_to_existing_cdb(self):
with libear.TemporaryDirectory() as tmpdir:
result = self.run_intercept(tmpdir, 'build_clean', [])
self.assertTrue(os.path.isfile(result))
result = self.run_intercept(tmpdir, 'build_regular', ['--append'])
self.assertTrue(os.path.isfile(result))
self.assertEqual(5, self.count_entries(result))
class ResultFormatingTest(unittest.TestCase):
@staticmethod
def run_intercept(tmpdir, command):
result = os.path.join(tmpdir, 'cdb.json')
silent_check_call(
['intercept-build', '--cdb', result] + command,
cwd=tmpdir)
with open(result, 'r') as handler:
content = json.load(handler)
return content
def assert_creates_number_of_entries(self, command, count):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
create_empty_file(filename)
command.append(filename)
cmd = ['sh', '-c', ' '.join(command)]
cdb = self.run_intercept(tmpdir, cmd)
self.assertEqual(count, len(cdb))
def test_filter_preprocessor_only_calls(self):
self.assert_creates_number_of_entries(['cc', '-c'], 1)
self.assert_creates_number_of_entries(['cc', '-c', '-E'], 0)
self.assert_creates_number_of_entries(['cc', '-c', '-M'], 0)
self.assert_creates_number_of_entries(['cc', '-c', '-MM'], 0)
def assert_command_creates_entry(self, command, expected):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, command[-1])
create_empty_file(filename)
cmd = ['sh', '-c', ' '.join(command)]
cdb = self.run_intercept(tmpdir, cmd)
self.assertEqual(' '.join(expected), cdb[0]['command'])
def test_filter_preprocessor_flags(self):
self.assert_command_creates_entry(
['cc', '-c', '-MD', 'test.c'],
['cc', '-c', 'test.c'])
self.assert_command_creates_entry(
['cc', '-c', '-MMD', 'test.c'],
['cc', '-c', 'test.c'])
self.assert_command_creates_entry(
['cc', '-c', '-MD', '-MF', 'test.d', 'test.c'],
['cc', '-c', 'test.c'])
def test_pass_language_flag(self):
self.assert_command_creates_entry(
['cc', '-c', '-x', 'c', 'test.c'],
['cc', '-c', '-x', 'c', 'test.c'])
self.assert_command_creates_entry(
['cc', '-c', 'test.c'],
['cc', '-c', 'test.c'])
def test_pass_arch_flags(self):
self.assert_command_creates_entry(
['clang', '-c', 'test.c'],
['cc', '-c', 'test.c'])
self.assert_command_creates_entry(
['clang', '-c', '-arch', 'i386', 'test.c'],
['cc', '-c', '-arch', 'i386', 'test.c'])
self.assert_command_creates_entry(
['clang', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'],
['cc', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'])
| lgpl-2.1 |
tgroh/incubator-beam | sdks/python/apache_beam/io/gcp/datastore/v1/helper_test.py | 8 | 9777 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for datastore helper."""
import errno
import random
import sys
import unittest
from socket import error as SocketError
from mock import MagicMock
# pylint: disable=ungrouped-imports
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from apache_beam.testing.test_utils import patch_retry
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.cloud.proto.datastore.v1.entity_pb2 import Key
from google.rpc import code_pb2
from googledatastore.connection import RPCError
from googledatastore import helper as datastore_helper
except ImportError:
datastore_helper = None
# pylint: enable=wrong-import-order, wrong-import-position
# pylint: enable=ungrouped-imports
@unittest.skipIf(datastore_helper is None, 'GCP dependencies are not installed')
class HelperTest(unittest.TestCase):
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = 'dummy_kind'
patch_retry(self, helper)
self._retriable_errors = [
RPCError("dummy", code_pb2.INTERNAL, "failed"),
SocketError(errno.ECONNRESET, "Connection Reset"),
SocketError(errno.ETIMEDOUT, "Timed out")
]
self._non_retriable_errors = [
RPCError("dummy", code_pb2.UNAUTHENTICATED, "failed"),
SocketError(errno.EADDRNOTAVAIL, "Address not available")
]
def permanent_retriable_datastore_failure(self, req):
raise RPCError("dummy", code_pb2.UNAVAILABLE, "failed")
def transient_retriable_datastore_failure(self, req):
if self._transient_fail_count:
self._transient_fail_count -= 1
raise random.choice(self._retriable_errors)
else:
return datastore_pb2.RunQueryResponse()
def non_retriable_datastore_failure(self, req):
raise random.choice(self._non_retriable_errors)
def test_query_iterator(self):
self._mock_datastore.run_query.side_effect = (
self.permanent_retriable_datastore_failure)
query_iterator = helper.QueryIterator("project", None, self._query,
self._mock_datastore)
self.assertRaises(RPCError, iter(query_iterator).next)
self.assertEqual(6, len(self._mock_datastore.run_query.call_args_list))
def test_query_iterator_with_transient_failures(self):
self._mock_datastore.run_query.side_effect = (
self.transient_retriable_datastore_failure)
query_iterator = helper.QueryIterator("project", None, self._query,
self._mock_datastore)
fail_count = 5
self._transient_fail_count = fail_count
for _ in query_iterator:
pass
self.assertEqual(fail_count + 1,
len(self._mock_datastore.run_query.call_args_list))
def test_query_iterator_with_non_retriable_failures(self):
self._mock_datastore.run_query.side_effect = (
self.non_retriable_datastore_failure)
query_iterator = helper.QueryIterator("project", None, self._query,
self._mock_datastore)
self.assertRaises(tuple(map(type, self._non_retriable_errors)),
iter(query_iterator).next)
self.assertEqual(1, len(self._mock_datastore.run_query.call_args_list))
def test_query_iterator_with_single_batch(self):
num_entities = 100
batch_size = 500
self.check_query_iterator(num_entities, batch_size, self._query)
def test_query_iterator_with_multiple_batches(self):
num_entities = 1098
batch_size = 500
self.check_query_iterator(num_entities, batch_size, self._query)
def test_query_iterator_with_exact_batch_multiple(self):
num_entities = 1000
batch_size = 500
self.check_query_iterator(num_entities, batch_size, self._query)
def test_query_iterator_with_query_limit(self):
num_entities = 1098
batch_size = 500
self._query.limit.value = 1004
self.check_query_iterator(num_entities, batch_size, self._query)
def test_query_iterator_with_large_query_limit(self):
num_entities = 1098
batch_size = 500
self._query.limit.value = 10000
self.check_query_iterator(num_entities, batch_size, self._query)
def check_query_iterator(self, num_entities, batch_size, query):
"""A helper method to test the QueryIterator.
Args:
num_entities: number of entities contained in the fake datastore.
batch_size: the number of entities returned by fake datastore in one req.
query: the query to be executed
"""
entities = fake_datastore.create_entities(num_entities)
self._mock_datastore.run_query.side_effect = \
fake_datastore.create_run_query(entities, batch_size)
query_iterator = helper.QueryIterator("project", None, self._query,
self._mock_datastore)
i = 0
for entity in query_iterator:
self.assertEqual(entity, entities[i].entity)
i += 1
limit = query.limit.value if query.HasField('limit') else sys.maxsize
self.assertEqual(i, min(num_entities, limit))
def test_is_key_valid(self):
key = entity_pb2.Key()
# Complete with name, no ancestor
datastore_helper.add_key_path(key, 'kind', 'name')
self.assertTrue(helper.is_key_valid(key))
key = entity_pb2.Key()
# Complete with id, no ancestor
datastore_helper.add_key_path(key, 'kind', 12)
self.assertTrue(helper.is_key_valid(key))
key = entity_pb2.Key()
# Incomplete, no ancestor
datastore_helper.add_key_path(key, 'kind')
self.assertFalse(helper.is_key_valid(key))
key = entity_pb2.Key()
# Complete with name and ancestor
datastore_helper.add_key_path(key, 'kind', 'name', 'kind2', 'name2')
self.assertTrue(helper.is_key_valid(key))
key = entity_pb2.Key()
# Complete with id and ancestor
datastore_helper.add_key_path(key, 'kind', 'name', 'kind2', 123)
self.assertTrue(helper.is_key_valid(key))
key = entity_pb2.Key()
# Incomplete with ancestor
datastore_helper.add_key_path(key, 'kind', 'name', 'kind2')
self.assertFalse(helper.is_key_valid(key))
key = entity_pb2.Key()
self.assertFalse(helper.is_key_valid(key))
def test_compare_path_with_different_kind(self):
p1 = Key.PathElement()
p1.kind = 'dummy1'
p2 = Key.PathElement()
p2.kind = 'dummy2'
self.assertLess(helper.compare_path(p1, p2), 0)
def test_compare_path_with_different_id(self):
p1 = Key.PathElement()
p1.kind = 'dummy'
p1.id = 10
p2 = Key.PathElement()
p2.kind = 'dummy'
p2.id = 15
self.assertLess(helper.compare_path(p1, p2), 0)
def test_compare_path_with_different_name(self):
p1 = Key.PathElement()
p1.kind = 'dummy'
p1.name = "dummy1"
p2 = Key.PathElement()
p2.kind = 'dummy'
p2.name = 'dummy2'
self.assertLess(helper.compare_path(p1, p2), 0)
def test_compare_path_of_different_type(self):
p1 = Key.PathElement()
p1.kind = 'dummy'
p1.id = 10
p2 = Key.PathElement()
p2.kind = 'dummy'
p2.name = 'dummy'
self.assertLess(helper.compare_path(p1, p2), 0)
def test_key_comparator_with_different_partition(self):
k1 = Key()
k1.partition_id.namespace_id = 'dummy1'
k2 = Key()
k2.partition_id.namespace_id = 'dummy2'
self.assertRaises(ValueError, helper.key_comparator, k1, k2)
def test_key_comparator_with_single_path(self):
k1 = Key()
k2 = Key()
p1 = k1.path.add()
p2 = k2.path.add()
p1.kind = p2.kind = 'dummy'
self.assertEqual(helper.key_comparator(k1, k2), 0)
def test_key_comparator_with_multiple_paths_1(self):
k1 = Key()
k2 = Key()
p11 = k1.path.add()
p12 = k1.path.add()
p21 = k2.path.add()
p11.kind = p12.kind = p21.kind = 'dummy'
self.assertGreater(helper.key_comparator(k1, k2), 0)
def test_key_comparator_with_multiple_paths_2(self):
k1 = Key()
k2 = Key()
p11 = k1.path.add()
p21 = k2.path.add()
p22 = k2.path.add()
p11.kind = p21.kind = p22.kind = 'dummy'
self.assertLess(helper.key_comparator(k1, k2), 0)
def test_key_comparator_with_multiple_paths_3(self):
k1 = Key()
k2 = Key()
p11 = k1.path.add()
p12 = k1.path.add()
p21 = k2.path.add()
p22 = k2.path.add()
p11.kind = p12.kind = p21.kind = p22.kind = 'dummy'
self.assertEqual(helper.key_comparator(k1, k2), 0)
def test_key_comparator_with_multiple_paths_4(self):
k1 = Key()
k2 = Key()
p11 = k1.path.add()
p12 = k2.path.add()
p21 = k2.path.add()
p11.kind = p12.kind = 'dummy'
# make path2 greater than path1
p21.kind = 'dummy1'
self.assertLess(helper.key_comparator(k1, k2), 0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rabipanda/tensorflow | tensorflow/python/training/evaluation.py | 39 | 8616 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
def _get_or_create_eval_step():
"""Gets or creates the eval step `Tensor`.
Returns:
A `Tensor` representing a counter for the evaluation step.
Raises:
ValueError: If multiple `Tensors` have been added to the
`tf.GraphKeys.EVAL_STEP` collection.
"""
graph = ops.get_default_graph()
eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
if len(eval_steps) == 1:
return eval_steps[0]
elif len(eval_steps) > 1:
raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
else:
counter = variable_scope.get_variable(
'eval_step',
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
return counter
def _get_latest_eval_step_value(update_ops):
"""Gets the eval step `Tensor` value after running `update_ops`.
Args:
update_ops: A list of `Tensors` or a dictionary of names to `Tensors`,
which are run before reading the eval step value.
Returns:
A `Tensor` representing the value for the evaluation step.
"""
if isinstance(update_ops, dict):
update_ops = list(update_ops.values())
with ops.control_dependencies(update_ops):
return array_ops.identity(_get_or_create_eval_step().read_value())
class _StopAfterNEvalsHook(session_run_hook.SessionRunHook):
"""Run hook used by the evaluation routines to run the `eval_ops` N times."""
def __init__(self, num_evals, log_progress=True):
"""Constructs the run hook.
Args:
num_evals: The number of evaluations to run for. if set to None, will
iterate the dataset until all inputs are exhausted.
log_progress: Whether to log evaluation progress, defaults to True.
"""
# The number of evals to run for.
self._num_evals = num_evals
self._evals_completed = None
self._log_progress = log_progress
# Reduce logging frequency if there are 20 or more evaluations.
self._log_frequency = (1 if (num_evals is None or num_evals < 20)
else math.floor(num_evals / 10.))
def _set_evals_completed_tensor(self, updated_eval_step):
self._evals_completed = updated_eval_step
def before_run(self, run_context):
return session_run_hook.SessionRunArgs({
'evals_completed': self._evals_completed
})
def after_run(self, run_context, run_values):
evals_completed = run_values.results['evals_completed']
if self._log_progress:
if self._num_evals is None:
logging.info('Evaluation [%d]', evals_completed)
else:
if ((evals_completed % self._log_frequency) == 0 or
(self._num_evals == evals_completed)):
logging.info('Evaluation [%d/%d]', evals_completed, self._num_evals)
if self._num_evals is not None and evals_completed >= self._num_evals:
run_context.request_stop()
def _evaluate_once(checkpoint_path,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
hooks=None,
config=None):
"""Evaluates the model at the given checkpoint path.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_path: The path to a checkpoint to use for evaluation.
master: The BNS address of the TensorFlow master.
scaffold: An tf.train.Scaffold instance for initializing variables and
restoring variables. Note that `scaffold.init_fn` is used by the function
to restore the checkpoint. If you supply a custom init_fn, then it must
also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`, which is run until the session is requested to stop,
commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
evaluation loop.
config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = _get_or_create_eval_step()
# Prepare the run hooks.
hooks = list(hooks or [])
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1, use_locking=True)
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
eval_step_value = _get_latest_eval_step_value(eval_ops)
for h in hooks:
if isinstance(h, _StopAfterNEvalsHook):
h._set_evals_completed_tensor(eval_step_value) # pylint: disable=protected-access
logging.info('Starting evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
# Prepare the session creator.
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
final_ops_hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hooks.append(final_ops_hook)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
return final_ops_hook.final_ops_values
| apache-2.0 |
bunnyitvn/webptn | tests/regressiontests/admin_changelist/admin.py | 47 | 2998 | from __future__ import absolute_import
from django.contrib import admin
from django.core.paginator import Paginator
from .models import (Event, Child, Parent, Genre, Band, Musician, Group,
Quartet, Membership, ChordsMusician, ChordsBand, Invitation, Swallow)
site = admin.AdminSite(name="admin")
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(queryset, 5, orphans=2,
allow_empty_first_page=allow_empty_first_page)
class EventAdmin(admin.ModelAdmin):
list_display = ['event_date_func']
def event_date_func(self, event):
return event.date
site.register(Event, EventAdmin)
class ParentAdmin(admin.ModelAdmin):
list_filter = ['child__name']
search_fields = ['child__name']
class ChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
list_filter = ['parent', 'age']
def queryset(self, request):
return super(ChildAdmin, self).queryset(request).select_related("parent__name")
class CustomPaginationAdmin(ChildAdmin):
paginator = CustomPaginator
class FilteredChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def queryset(self, request):
return super(FilteredChildAdmin, self).queryset(request).filter(
name__contains='filtered')
class BandAdmin(admin.ModelAdmin):
list_filter = ['genres']
class GroupAdmin(admin.ModelAdmin):
list_filter = ['members']
class QuartetAdmin(admin.ModelAdmin):
list_filter = ['members']
class ChordsBandAdmin(admin.ModelAdmin):
list_filter = ['members']
class DynamicListDisplayChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
def get_list_display(self, request):
my_list_display = super(DynamicListDisplayChildAdmin, self).get_list_display(request)
if request.user.username == 'noparents':
my_list_display = list(my_list_display)
my_list_display.remove('parent')
return my_list_display
class DynamicListDisplayLinksChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
list_display_links = ['parent', 'name']
def get_list_display_links(self, request, list_display):
return ['age']
site.register(Child, DynamicListDisplayChildAdmin)
class SwallowAdmin(admin.ModelAdmin):
actions = None # prevent ['action_checkbox'] + list(list_display)
list_display = ('origin', 'load', 'speed')
site.register(Swallow, SwallowAdmin)
class DynamicListFilterChildAdmin(admin.ModelAdmin):
list_filter = ('parent', 'name', 'age')
def get_list_filter(self, request):
my_list_filter = super(DynamicListFilterChildAdmin, self).get_list_filter(request)
if request.user.username == 'noparents':
my_list_filter = list(my_list_filter)
my_list_filter.remove('parent')
return my_list_filter
| bsd-3-clause |
henrytao-me/openerp.positionq | openerp/addons/edi/models/edi.py | 33 | 32142 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import simplejson as json
import logging
import re
import time
import urllib2
import openerp
import openerp.release as release
import openerp.netsvc as netsvc
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'OpenERP ' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
match = EXTERNAL_ID_PATTERN.match(ext_id)
assert match, \
_("'%s' is an invalid external ID") % (ext_id)
return {'module': match.group(1),
'db_uuid': match.group(2),
'id': match.group(3),
'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
"""Generate a unique string to represent a (database_uuid,model,record_id) pair
without being too long, and with a very low probability of collisions.
"""
msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
digest = hashlib.sha1(msg).digest()
# fold the sha1 20 bytes digest to 9 bytes
digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
# b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
digest = base64.urlsafe_b64encode(digest)
return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
"""Returns the last update timestamp for the given record,
if available, otherwise False
"""
if record._model._log_access:
record_log = record.perm_read()[0]
return record_log.get('write_date') or record_log.get('create_date') or False
return False
class edi(osv.AbstractModel):
_name = 'edi.edi'
_description = 'EDI Subsystem'
def new_edi_token(self, cr, uid, record):
"""Return a new, random unique token to identify this model record,
and to be used as token when exporting it as an EDI document.
:param browse_record record: model record for which a token is needed
"""
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
return edi_token
def serialize(self, edi_documents):
"""Serialize the given EDI document structures (Python dicts holding EDI data),
using JSON serialization.
:param [dict] edi_documents: list of EDI document structures to serialize
:return: UTF-8 encoded string containing the serialized document
"""
serialized_list = json.dumps(edi_documents)
return serialized_list
def generate_edi(self, cr, uid, records, context=None):
"""Generates a final EDI document containing the EDI serialization
of the given records, which should all be instances of a Model
that has the :meth:`~.edi` mixin. The document is not saved in the
database.
:param list(browse_record) records: records to export as EDI
:return: UTF-8 encoded string containing the serialized records
"""
edi_list = []
for record in records:
record_model_obj = self.pool.get(record._name)
edi_list += record_model_obj.edi_export(cr, uid, [record], context=context)
return self.serialize(edi_list)
def load_edi(self, cr, uid, edi_documents, context=None):
"""Import the given EDI document structures into the system, using
:meth:`~.import_edi`.
:param edi_documents: list of Python dicts containing the deserialized
version of EDI documents
:return: list of (model, id, action) tuple containing the model and database ID
of all records that were imported in the system, plus a suggested
action definition dict for displaying each document.
"""
ir_module = self.pool.get('ir.module.module')
res = []
for edi_document in edi_documents:
module = edi_document.get('__import_module') or edi_document.get('__module')
assert module, 'a `__module` or `__import_module` attribute is required in each EDI document.'
if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
raise osv.except_osv(_('Missing Application.'),
_("The document you are trying to import requires the OpenERP `%s` application. "
"You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
model = edi_document.get('__import_model') or edi_document.get('__model')
assert model, 'a `__model` or `__import_model` attribute is required in each EDI document.'
model_obj = self.pool.get(model)
assert model_obj, 'model `%s` cannot be found, despite module `%s` being available - '\
'this EDI document seems invalid or unsupported.' % (model,module)
record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
res.append((model, record_id, record_action))
return res
def deserialize(self, edi_documents_string):
"""Return deserialized version of the given EDI Document string.
:param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
JSON-serialized EDI document(s)
:return: Python object representing the EDI document(s) (usually a list of dicts)
"""
return json.loads(edi_documents_string)
def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
"""Import a JSON serialized EDI Document string into the system, first retrieving it
from the given ``edi_url`` if provided.
:param str|unicode edi: UTF-8 string or unicode containing JSON-serialized
EDI Document to import. Must not be provided if
``edi_url`` is given.
:param str|unicode edi_url: URL where the EDI document (same format as ``edi``)
may be retrieved, without authentication.
"""
if edi_url:
assert not edi_document, 'edi must not be provided if edi_url is given.'
edi_document = urllib2.urlopen(edi_url).read()
assert edi_document, 'EDI Document is empty!'
edi_documents = self.deserialize(edi_document)
return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
"""Mixin class for Model objects that want be exposed as EDI documents.
Classes that inherit from this mixin class should override the
``edi_import()`` and ``edi_export()`` methods to implement their
specific behavior, based on the primitives provided by this mixin."""
def _edi_requires_attributes(self, attributes, edi):
model_name = edi.get('__imported_model') or edi.get('__model') or self._name
for attribute in attributes:
assert edi.get(attribute),\
'Attribute `%s` is required in %s EDI documents.' % (attribute, model_name)
# private method, not RPC-exposed as it creates ir.model.data entries as
# SUPERUSER based on its parameters
def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
context=None):
"""Generate/Retrieve unique external ID for ``record``.
Each EDI record and each relationship attribute in it is identified by a
unique external ID, which includes the database's UUID, as a way to
refer to any record within any OpenERP instance, without conflict.
For OpenERP records that have an existing "External ID" (i.e. an entry in
ir.model.data), the EDI unique identifier for this record will be made of
"%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
UUID MUST NOT contain a colon characters (this is guaranteed by the
UUID algorithm).
For records that have no existing ir.model.data entry, a new one will be
created during the EDI export. It is recommended that the generated external ID
contains a readable reference to the record model, plus a unique value that
hides the database ID. If ``existing_id`` is provided (because it came from
an import), it will be used instead of generating a new one.
If ``existing_module`` is provided (because it came from
an import), it will be used instead of using local values.
:param browse_record record: any browse_record needing an EDI external ID
:param string existing_id: optional existing external ID value, usually coming
from a just-imported EDI record, to be used instead
of generating a new one
:param string existing_module: optional existing module name, usually in the
format ``module:db_uuid`` and coming from a
just-imported EDI record, to be used instead
of local values
:return: the full unique External ID to use for record
"""
ir_model_data = self.pool.get('ir.model.data')
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
ext_id = record.get_external_id()[record.id]
if not ext_id:
ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
# ID is unique cross-db thanks to db_uuid (already included in existing_module)
module = existing_module or "%s:%s" % (record._original_module, db_uuid)
_logger.debug("%s: Generating new external ID `%s.%s` for %r.", self._name,
module, ext_id, record)
ir_model_data.create(cr, openerp.SUPERUSER_ID,
{'name': ext_id,
'model': record._name,
'module': module,
'res_id': record.id})
else:
module, ext_id = ext_id.split('.')
if not ':' in module:
# this record was not previously EDI-imported
if not module == record._original_module:
# this could happen for data records defined in a module that depends
# on the module that owns the model, e.g. purchase defines
# product.pricelist records.
_logger.debug('Mismatching module: expected %s, got %s, for %s.',
module, record._original_module, record)
# ID is unique cross-db thanks to db_uuid
module = "%s:%s" % (module, db_uuid)
return '%s.%s' % (module, ext_id)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
return {'type': 'ir.actions.act_window',
'view_mode': 'form,tree',
'view_type': 'form',
'res_model': self._name,
'res_id': id}
def edi_metadata(self, cr, uid, records, context=None):
"""Return a list containing the boilerplate EDI structures for
exporting ``records`` as EDI, including
the metadata fields
The metadata fields always include::
{
'__model': 'some.model', # record model
'__module': 'module', # require module
'__id': 'module:db-uuid:model.id', # unique global external ID for the record
'__last_update': '2011-01-01 10:00:00', # last update date in UTC!
'__version': 1, # EDI spec version
'__generator' : 'OpenERP', # EDI generator
'__generator_version' : [6,1,0], # server version, to check compatibility.
'__attachments_':
}
:param list(browse_record) records: records to export
:return: list of dicts containing boilerplate EDI metadata for each record,
at the corresponding index from ``records``.
"""
ir_attachment = self.pool.get('ir.attachment')
results = []
for record in records:
ext_id = self._edi_external_id(cr, uid, record, context=context)
edi_dict = {
'__id': ext_id,
'__last_update': last_update_for(record),
'__model' : record._name,
'__module' : record._original_module,
'__version': EDI_PROTOCOL_VERSION,
'__generator': EDI_GENERATOR,
'__generator_version': EDI_GENERATOR_VERSION,
}
attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
if attachment_ids:
attachments = []
for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
attachments.append({
'name' : attachment.name,
'content': attachment.datas, # already base64 encoded!
'file_name': attachment.datas_fname,
})
edi_dict.update(__attachments=attachments)
results.append(edi_dict)
return results
def edi_m2o(self, cr, uid, record, context=None):
"""Return a m2o EDI representation for the given record.
The EDI format for a many2one is::
['unique_external_id', 'Document Name']
"""
edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
relation_model = record._model
name = relation_model.name_get(cr, uid, [record.id], context=context)
name = name and name[0][1] or False
return [edi_ext_id, name]
def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
"""Return a list representing a O2M EDI relationship containing
all the given records, according to the given ``edi_struct``.
This is basically the same as exporting all the record using
:meth:`~.edi_export` with the given ``edi_struct``, and wrapping
the results in a list.
Example::
[ # O2M fields would be a list of dicts, with their
{ '__id': 'module:db-uuid.id', # own __id.
'__last_update': 'iso date', # update date
'name': 'some name',
#...
},
# ...
],
"""
result = []
for record in records:
result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
return result
def edi_m2m(self, cr, uid, records, context=None):
"""Return a list representing a M2M EDI relationship directed towards
all the given records.
This is basically the same as exporting all the record using
:meth:`~.edi_m2o` and wrapping the results in a list.
Example::
# M2M fields are exported as a list of pairs, like a list of M2O values
[
['module:db-uuid.id1', 'Task 01: bla bla'],
['module:db-uuid.id2', 'Task 02: bla bla']
]
"""
return [self.edi_m2o(cr, uid, r, context=context) for r in records]
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Returns a list of dicts representing EDI documents containing the
records, and matching the given ``edi_struct``, if provided.
:param edi_struct: if provided, edi_struct should be a dictionary
with a skeleton of the fields to export.
Basic fields can have any key as value, but o2m
values should have a sample skeleton dict as value,
to act like a recursive export.
For example, for a res.partner record::
edi_struct: {
'name': True,
'company_id': True,
'address': {
'name': True,
'street': True,
}
}
Any field not specified in the edi_struct will not
be included in the exported data. Fields with no
value (False) will be omitted in the EDI struct.
If edi_struct is omitted, no fields will be exported
"""
if edi_struct is None:
edi_struct = {}
fields_to_export = edi_struct.keys()
results = []
for record in records:
edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
for field in fields_to_export:
column = self._all_columns[field].column
value = getattr(record, field)
if not value and value not in ('', 0):
continue
elif column._type == 'many2one':
value = self.edi_m2o(cr, uid, value, context=context)
elif column._type == 'many2many':
value = self.edi_m2m(cr, uid, value, context=context)
elif column._type == 'one2many':
value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field, {}), context=context)
edi_dict[field] = value
results.append(edi_dict)
return results
def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
model = self.pool.get(model_name)
search_results = model.name_search(cr, uid, name, operator='=', context=context)
if len(search_results) == 1:
return model.browse(cr, uid, search_results[0][0], context=context)
return False
def _edi_generate_report_attachment(self, cr, uid, record, context=None):
"""Utility method to generate the first PDF-type report declared for the
current model with ``usage`` attribute set to ``default``.
This must be called explicitly by models that need it, usually
at the beginning of ``edi_export``, before the call to ``super()``."""
ir_actions_report = self.pool.get('ir.actions.report.xml')
matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
('report_type','=','pdf'),
('usage','=','default')])
if matching_reports:
report = ir_actions_report.browse(cr, uid, matching_reports[0])
report_service = 'report.' + report.report_name
service = netsvc.LocalService(report_service)
(result, format) = service.create(cr, uid, [record.id], {'model': self._name}, context=context)
eval_context = {'time': time, 'object': record}
if not report.attachment or not eval(report.attachment, eval_context):
# no auto-saving of report as attachment, need to do it manually
result = base64.b64encode(result)
file_name = record.name_get()[0][1]
file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
file_name += ".pdf"
self.pool.get('ir.attachment').create(cr, uid,
{
'name': file_name,
'datas': result,
'datas_fname': file_name,
'res_model': self._name,
'res_id': record.id,
'type': 'binary'
},
context=context)
def _edi_import_attachments(self, cr, uid, record_id, edi, context=None):
ir_attachment = self.pool.get('ir.attachment')
for attachment in edi.get('__attachments', []):
# check attachment data is non-empty and valid
file_data = None
try:
file_data = base64.b64decode(attachment.get('content'))
except TypeError:
pass
assert file_data, 'Incorrect/Missing attachment file content.'
assert attachment.get('name'), 'Incorrect/Missing attachment name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name.'
ir_attachment.create(cr, uid, {'name': attachment['name'],
'datas_fname': attachment['file_name'],
'res_model': self._name,
'res_id': record_id,
# should be pure 7bit ASCII
'datas': str(attachment['content']),
}, context=context)
def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
"""Returns browse_record representing object identified by the model and external_id,
or None if no record was found with this external id.
:param external_id: fully qualified external id, in the EDI form
``module:db_uuid:identifier``.
:param model: model name the record belongs to.
"""
ir_model_data = self.pool.get('ir.model.data')
# external_id is expected to have the form: ``module:db_uuid:model.random_name``
ext_id_members = split_external_id(external_id)
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
module = ext_id_members['module']
ext_id = ext_id_members['id']
modules = []
ext_db_uuid = ext_id_members['db_uuid']
if ext_db_uuid:
modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
if ext_db_uuid is None or ext_db_uuid == db_uuid:
# local records may also be registered without the db_uuid
modules.append(module)
data_ids = ir_model_data.search(cr, uid, [('model','=',model),
('name','=',ext_id),
('module','in',modules)])
if data_ids:
model = self.pool.get(model)
data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
if model.exists(cr, uid, [data.res_id]):
return model.browse(cr, uid, data.res_id, context=context)
# stale external-id, cleanup to allow re-import, as the corresponding record is gone
ir_model_data.unlink(cr, 1, [data_ids[0]])
def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
"""Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
given model, returning the corresponding database ID:
* First, checks if the ``external_id`` is already known, in which case the corresponding
database ID is directly returned, without doing anything else;
* If the ``external_id`` is unknown, attempts to locate an existing record
with the same ``value`` via name_search(). If found, the given external_id will
be assigned to this local record (in addition to any existing one)
* If previous steps gave no result, create a new record with the given
value in the target model, assign it the given external_id, and return
the new database ID
:param str value: display name of the record to import
:param str external_id: fully-qualified external ID of the record
:return: database id of newly-imported or pre-existing record
"""
_logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
need_new_ext_id = False
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get.",
self._name, external_id, value)
target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
need_new_ext_id = True
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it.",
self._name, external_id, value)
# also need_new_ext_id here, but already been set above
model = self.pool.get(model)
res_id, _ = model.name_create(cr, uid, value, context=context)
target = model.browse(cr, uid, res_id, context=context)
else:
_logger.debug("%s: Importing EDI relationship [%r,%r] - record already exists with ID %s, using it",
self._name, external_id, value, target.id)
if need_new_ext_id:
ext_id_members = split_external_id(external_id)
# module name is never used bare when creating ir.model.data entries, in order
# to avoid being taken as part of the module's data, and cleanup up at next update
module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
# create a new ir.model.data entry for this value
self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
return target.id
def edi_import(self, cr, uid, edi, context=None):
"""Imports a dict representing an EDI document into the system.
:param dict edi: EDI document to import
:return: the database ID of the imported record
"""
assert self._name == edi.get('__import_model') or \
('__import_model' not in edi and self._name == edi.get('__model')), \
"EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)." % \
(edi.get('__model'), self._name)
# First check the record is now already known in the database, in which case it is ignored
ext_id_members = split_external_id(edi['__id'])
existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
if existing:
_logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
return existing.id
record_values = {}
o2m_todo = {} # o2m values are processed after their parent already exists
for field_name, field_value in edi.iteritems():
# skip metadata and empty fields
if field_name.startswith('__') or field_value is None or field_value is False:
continue
field_info = self._all_columns.get(field_name)
if not field_info:
_logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document.', field_name, self._name)
continue
field = field_info.column
# skip function/related fields
if isinstance(field, fields.function):
_logger.warning("Unexpected function field value is found in '%s' EDI document: '%s'." % (self._name, field_name))
continue
relation_model = field._obj
if field._type == 'many2one':
record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
field_value[1], field_value[0],
context=context)
elif field._type == 'many2many':
record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
m2m_value[0], context=context)
for m2m_value in field_value]
elif field._type == 'one2many':
# must wait until parent report is imported, as the parent relationship
# is often required in o2m child records
o2m_todo[field_name] = field_value
else:
record_values[field_name] = field_value
module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
xml_id=ext_id_members['id'], context=context)
record_display, = self.name_get(cr, uid, [record_id], context=context)
# process o2m values, connecting them to their parent on-the-fly
for o2m_field, o2m_value in o2m_todo.iteritems():
field = self._all_columns[o2m_field].column
dest_model = self.pool.get(field._obj)
for o2m_line in o2m_value:
# link to parent record: expects an (ext_id, name) pair
o2m_line[field._fields_id] = (ext_id_members['full'], record_display[1])
dest_model.edi_import(cr, uid, o2m_line, context=context)
# process the attachments, if any
self._edi_import_attachments(cr, uid, record_id, edi, context=context)
return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ShenggaoZhu/midict | setup.py | 1 | 3137 | # -*- coding: utf-8 -*-
import codecs
import os
import re
import sys
from setuptools import setup
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def read(*parts):
return codecs.open(os.path.join(os.path.dirname(__file__), *parts),
encoding='utf8').read()
try:
bytes
except NameError:
bytes = str
class UltraMagicString(object):
'''
Taken from
http://stackoverflow.com/questions/1162338/whats-the-right-way-to-use-unicode-metadata-in-setup-py
'''
def __init__(self, value):
if not isinstance(value, bytes):
value = value.encode('utf8')
self.value = value
def __bytes__(self):
return self.value
def __unicode__(self):
return self.value.decode('UTF-8')
if sys.version_info[0] < 3:
__str__ = __bytes__
else:
__str__ = __unicode__
def __add__(self, other):
return UltraMagicString(self.value + bytes(other))
def split(self, *args, **kw):
return str(self).split(*args, **kw)
long_description = UltraMagicString('\n\n'.join((
read('README.rst'),
# read('CHANGES.rst'),
)))
package_name = 'midict'
setup(
name=package_name,
version=find_version(package_name, '__init__.py'),
url='https://github.com/ShenggaoZhu/midict',
# download_url = 'https://codeload.github.com/ShenggaoZhu/midict/zip/v0.1.1',
license='MIT',
description=
'MIDict (Multi-Index Dict) can be indexed by any "keys" or "values", suitable as a '
'bidirectional/inverse dict or a multi-key/multi-value dict (a drop-in replacement '
'for dict in Python 2 & 3).',
long_description=long_description,
author=UltraMagicString('Shenggao Zhu'),
author_email='zshgao@gmail.com',
packages=[package_name],
include_package_data=True,
zip_safe=True,
keywords = 'dict, dictionary, mapping, bidirectional, bijective, two-way, double, inverse, reverse, '
'multiple, index, multiple indices, multiple values, multiple keys, MIMapping, MIDict, FrozenMIDict, '
'AttrDict, IndexDict, multi-indexing syntax',
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
) | mit |
four2five/0.19.2 | contrib/hod/hodlib/Common/xmlrpc.py | 182 | 2374 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import xmlrpclib, time, random, signal
from hodlib.Common.util import hodInterrupt, HodInterruptException
class hodXRClient(xmlrpclib.ServerProxy):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, installSignalHandlers=1, retryRequests=True, timeOut=15):
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, verbose,
allow_none)
self.__retryRequests = retryRequests
self.__timeOut = timeOut
if (installSignalHandlers!=0):
self.__set_alarm()
def __set_alarm(self):
def alarm_handler(sigNum, sigHandler):
raise Exception("XML-RPC socket timeout.")
signal.signal(signal.SIGALRM, alarm_handler)
def __request(self, methodname, params):
response = None
retryWaitTime = 5 + random.randint(0, 5)
for i in range(0, 30):
signal.alarm(self.__timeOut)
try:
response = self._ServerProxy__request(methodname, params)
signal.alarm(0)
break
except Exception:
if self.__retryRequests:
if hodInterrupt.isSet():
raise HodInterruptException()
time.sleep(retryWaitTime)
else:
raise Exception("hodXRClientTimeout")
return response
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
| apache-2.0 |
futurecore/revelation | revelation/test/test_execute_branch.py | 1 | 1838 | from revelation.instruction import Instruction
from revelation.isa import decode
from revelation.machine import RESET_ADDR
from revelation.test.machine import new_state, StateChecker
import opcode_factory
import pytest
@pytest.mark.parametrize('is16bit,cond,imm,offset',
[# BEQ (never branches here).
(False, 0b0000, 63, (63 << 1)),
(True, 0b0000, 127, (127 << 1)),
(False, 0b0000, pow(2, 24) - 1, (-1 << 1)),
(True, 0b0000, pow(2, 8) - 1, (-1 << 1)),
# B (unconditional).
(False, 0b1110, 63, (63 << 1)),
(True, 0b1110, 127, (127 << 1)),
(False, 0b1110, pow(2, 24) - 1, (-1 << 1)),
(True, 0b1110, pow(2, 8) - 1, (-1 << 1)),
])
def test_execute_bcond(is16bit, cond, imm, offset):
state = new_state(AZ=1, pc=90)
factory = opcode_factory.bcond16 if is16bit else opcode_factory.bcond32
instr = factory(condition=cond, imm=imm)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_state = StateChecker(pc=(90 + offset), AZ=1)
expected_state.check(state)
@pytest.mark.parametrize('is16bit', [True, False])
def test_branch_link(is16bit):
state = new_state()
cond = 0b1111 # Condition code for branch-and-link
factory = opcode_factory.bcond16 if is16bit else opcode_factory.bcond32
instr = factory(condition=cond, imm=0b00011000)
name, executefn = decode(instr)
executefn(state, Instruction(instr, None))
expected_LR = (2 if is16bit else 4) + RESET_ADDR
expected = StateChecker(rfLR=expected_LR, pc=(RESET_ADDR + (0b00011000 << 1)))
expected.check(state)
| bsd-3-clause |
jessicalucci/NovaOrc | plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py | 14 | 10373 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script is used to configure openvswitch flows on XenServer hosts.
"""
import os
import simplejson as json
import sys
# This is written to Python 2.4, since that is what is available on XenServer
import netaddr
import novalib
OVS_OFCTL = '/usr/bin/ovs-ofctl'
class OvsFlow(object):
def __init__(self, bridge, params):
self.bridge = bridge
self.params = params
def add(self, rule):
novalib.execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params)
def clear_flows(self, ofport):
novalib.execute(OVS_OFCTL, 'del-flows',
self.bridge, "in_port=%s" % ofport)
def main(command, vif_raw, net_type):
if command not in ('online', 'offline'):
return
vif_name, dom_id, vif_index = vif_raw.split('-')
vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
bridge = novalib.execute_get_output('/usr/bin/ovs-vsctl',
'iface-to-br', vif)
xsls = novalib.execute_get_output('/usr/bin/xenstore-ls',
'/local/domain/%s/vm-data/networking' % dom_id)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsread = novalib.execute_get_output('/usr/bin/xenstore-read',
'/local/domain/%s/vm-data/networking/%s' %
(dom_id, mac))
data = json.loads(xsread)
if data["label"] == "public":
this_vif = "vif%s.0" % dom_id
phys_dev = "eth0"
else:
this_vif = "vif%s.1" % dom_id
phys_dev = "eth1"
if vif == this_vif:
vif_ofport = novalib.execute_get_output('/usr/bin/ovs-vsctl',
'get', 'Interface', vif, 'ofport')
phys_ofport = novalib.execute_get_output('/usr/bin/ovs-vsctl',
'get', 'Interface', phys_dev, 'ofport')
params = dict(VIF_NAME=vif,
MAC=data['mac'],
OF_PORT=vif_ofport,
PHYS_PORT=phys_ofport)
ovs = OvsFlow(bridge, params)
if command == 'offline':
# I haven't found a way to clear only IPv4 or IPv6 rules.
ovs.clear_flows(vif_ofport)
if command == 'online':
if net_type in ('ipv4', 'all') and 'ips' in data:
for ip4 in data['ips']:
ovs.params.update({'IPV4_ADDR': ip4['ip']})
apply_ovs_ipv4_flows(ovs, bridge, params)
if net_type in ('ipv6', 'all') and 'ip6s' in data:
for ip6 in data['ip6s']:
mac_eui64 = netaddr.EUI(data['mac']).eui64()
link_local = str(mac_eui64.ipv6_link_local())
ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local})
ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']})
apply_ovs_ipv6_flows(ovs, bridge, params)
def apply_ovs_ipv4_flows(ovs, bridge, params):
# When ARP traffic arrives from a vif, push it to virtual port
# 9999 for further processing
ovs.add("priority=4,arp,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"nw_src=%(IPV4_ADDR)s,arp_sha=%(MAC)s,actions=resubmit:9999")
ovs.add("priority=4,arp,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"nw_src=0.0.0.0,arp_sha=%(MAC)s,actions=resubmit:9999")
# When IP traffic arrives from a vif, push it to virtual port 9999
# for further processing
ovs.add("priority=4,ip,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"nw_src=%(IPV4_ADDR)s,actions=resubmit:9999")
# Drop IP bcast/mcast
ovs.add("priority=6,ip,in_port=%(OF_PORT)s,dl_dst=ff:ff:ff:ff:ff:ff,"
"actions=drop")
ovs.add("priority=5,ip,in_port=%(OF_PORT)s,nw_dst=224.0.0.0/4,"
"actions=drop")
ovs.add("priority=5,ip,in_port=%(OF_PORT)s,nw_dst=240.0.0.0/4,"
"actions=drop")
# Pass ARP requests coming from any VMs on the local HV (port
# 9999) or coming from external sources (PHYS_PORT) to the VM and
# physical NIC. We output this to the physical NIC as well, since
# with instances of shared ip groups, the active host for the
# destination IP might be elsewhere...
ovs.add("priority=3,arp,in_port=9999,nw_dst=%(IPV4_ADDR)s,"
"actions=output:%(OF_PORT)s,output:%(PHYS_PORT)s")
# Pass ARP traffic originating from external sources the VM with
# the matching IP address
ovs.add("priority=3,arp,in_port=%(PHYS_PORT)s,nw_dst=%(IPV4_ADDR)s,"
"actions=output:%(OF_PORT)s")
# Pass ARP traffic from one VM (src mac already validated) to
# another VM on the same HV
ovs.add("priority=3,arp,in_port=9999,dl_dst=%(MAC)s,"
"actions=output:%(OF_PORT)s")
# Pass ARP replies coming from the external environment to the
# target VM
ovs.add("priority=3,arp,in_port=%(PHYS_PORT)s,dl_dst=%(MAC)s,"
"actions=output:%(OF_PORT)s")
# ALL IP traffic: Pass IP data coming from any VMs on the local HV
# (port 9999) or coming from external sources (PHYS_PORT) to the
# VM and physical NIC. We output this to the physical NIC as
# well, since with instances of shared ip groups, the active host
# for the destination IP might be elsewhere...
ovs.add("priority=3,ip,in_port=9999,dl_dst=%(MAC)s,"
"nw_dst=%(IPV4_ADDR)s,actions=output:%(OF_PORT)s,"
"output:%(PHYS_PORT)s")
# Pass IP traffic from the external environment to the VM
ovs.add("priority=3,ip,in_port=%(PHYS_PORT)s,dl_dst=%(MAC)s,"
"nw_dst=%(IPV4_ADDR)s,actions=output:%(OF_PORT)s")
# Send any local traffic to the physical NIC's OVS port for
# physical network learning
ovs.add("priority=2,in_port=9999,actions=output:%(PHYS_PORT)s")
def apply_ovs_ipv6_flows(ovs, bridge, params):
# allow valid IPv6 ND outbound (are both global and local IPs needed?)
# Neighbor Solicitation
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
"actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
"actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal")
# Neighbor Advertisement
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,"
"nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,"
"nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal")
ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal")
# drop all other neighbor discovery (req b/c we permit all icmp6 below)
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop")
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop")
# do not allow sending specifc ICMPv6 types
# Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop")
# Redirect Gateway
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop")
# Mobile Prefix Solicitation
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop")
# Mobile Prefix Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop")
# Multicast Router Advertisement
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop")
# Multicast Router Solicitation
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop")
# Multicast Router Termination
ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop")
# allow valid IPv6 outbound, by type
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal")
ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
"ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal")
# all else will be dropped ...
if __name__ == "__main__":
if len(sys.argv) != 4:
print ("usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " %
os.path.basename(sys.argv[0]))
sys.exit(1)
else:
command, vif_raw, net_type = sys.argv[1:4]
main(command, vif_raw, net_type)
| apache-2.0 |
dungtn/hackspace-2016 | source/convolutional_mlp.py | 1 | 12792 | """This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
from __future__ import print_function
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv2d
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from numpy import float32
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
import RunConv
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
dataset='data.pkl',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
datasets = RunConv.load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches //= batch_size
n_valid_batches //= batch_size
n_test_batches //= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 4, 64, 64))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 4, 64, 64),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=2)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print('training @ iter = ', iter)
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in range(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
if __name__ == '__main__':
evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
| gpl-3.0 |
stonegithubs/odoo | addons/hr_contract/base_action_rule.py | 389 | 2646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_action_rule.base_action_rule import get_datetime
from openerp.osv import fields, osv
class base_action_rule(osv.Model):
""" Add resource and calendar for time-based conditions """
_name = 'base.action.rule'
_inherit = ['base.action.rule']
_columns = {
'trg_date_resource_field_id': fields.many2one(
'ir.model.fields', 'Use employee work schedule',
help='Use the user\'s working schedule.',
),
}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
""" Override the check of delay to try to use a user-related calendar.
If no calendar is found, fallback on the default behavior. """
if action.trg_date_calendar_id and action.trg_date_range_type == 'day' and action.trg_date_resource_field_id:
user = record[action.trg_date_resource_field_id.name]
if user.employee_ids and user.employee_ids[0].contract_id \
and user.employee_ids[0].contract_id.working_hours:
calendar = user.employee_ids[0].contract_id.working_hours
start_dt = get_datetime(record_dt)
resource_id = user.employee_ids[0].resource_id.id
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, calendar.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, resource_id=resource_id,
context=context
)
return action_dt
return super(base_action_rule, self)._check_delay(cr, uid, action, record, record_dt, context=context)
| agpl-3.0 |
JAOSP/aosp_platform_external_chromium_org | build/android/pylib/fake_dns.py | 31 | 2221 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import android_commands
import constants
import logging
import os
import subprocess
import time
class FakeDns(object):
"""Wrapper class for the fake_dns tool."""
_FAKE_DNS_PATH = constants.TEST_EXECUTABLE_DIR + '/fake_dns'
def __init__(self, adb, build_type):
"""
Args:
adb: the AndroidCommands to use.
build_type: 'Release' or 'Debug'.
"""
self._adb = adb
self._build_type = build_type
self._fake_dns = None
self._original_dns = None
def _PushAndStartFakeDns(self):
"""Starts the fake_dns server that replies all name queries 127.0.0.1.
Returns:
subprocess instance connected to the fake_dns process on the device.
"""
self._adb.PushIfNeeded(
os.path.join(constants.DIR_SOURCE_ROOT, 'out', self._build_type,
'fake_dns'),
FakeDns._FAKE_DNS_PATH)
return subprocess.Popen(
['adb', '-s', self._adb._adb.GetSerialNumber(),
'shell', '%s -D' % FakeDns._FAKE_DNS_PATH])
def SetUp(self):
"""Configures the system to point to a DNS server that replies 127.0.0.1.
This can be used in combination with the forwarder to forward all web
traffic to a replay server.
The TearDown() method will perform all cleanup.
"""
self._adb.RunShellCommand('ip route add 8.8.8.0/24 via 127.0.0.1 dev lo')
self._fake_dns = self._PushAndStartFakeDns()
self._original_dns = self._adb.RunShellCommand('getprop net.dns1')[0]
self._adb.RunShellCommand('setprop net.dns1 127.0.0.1')
time.sleep(2) # Time for server to start and the setprop to take effect.
def TearDown(self):
"""Shuts down the fake_dns."""
if self._fake_dns:
if not self._original_dns or self._original_dns == '127.0.0.1':
logging.warning('Bad original DNS, falling back to Google DNS.')
self._original_dns = '8.8.8.8'
self._adb.RunShellCommand('setprop net.dns1 %s' % self._original_dns)
self._fake_dns.kill()
self._adb.RunShellCommand('ip route del 8.8.8.0/24 via 127.0.0.1 dev lo')
| bsd-3-clause |
evernym/plenum | plenum/test/checkpoints/test_ordering_after_catchup.py | 2 | 2191 | from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check
from plenum.test.node_catchup.helper import waitNodeDataEquality, \
ensure_all_nodes_have_same_data
from plenum.common.util import randomString
from plenum.test.test_node import checkNodesConnected
from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node
CHK_FREQ = 6
LOG_SIZE = 3 * CHK_FREQ
nodeCount = 4
def add_new_node(looper, pool_nodes, sdk_pool_handle, sdk_wallet_steward,
tdir, tconf, all_plugins_path):
name = randomString(6)
node_name = "Node-" + name
new_steward_name = "Steward-" + name
_, new_node = sdk_add_new_steward_and_node(
looper, sdk_pool_handle, sdk_wallet_steward,
new_steward_name, node_name, tdir, tconf,
allPluginsPath=all_plugins_path)
pool_nodes.append(new_node)
looper.run(checkNodesConnected(pool_nodes))
waitNodeDataEquality(looper, new_node, *pool_nodes[:-1], exclude_from_check=['check_last_ordered_3pc_backup'])
# The new node did not participate in ordering of the batch with
# the new steward NYM transaction and the batch with the new NODE
# transaction. The new node got these transactions via catch-up.
return new_node
def test_ordering_after_more_than_f_nodes_caught_up(
chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward,
tdir, tconf, allPluginsPath):
"""
Verifies that more than LOG_SIZE batches can be ordered in one view
after more than f nodes caught up in this view when some 3PC-batches
had already been ordered in this view.
"""
initial_view_no = txnPoolNodeSet[0].viewNo
for _ in range(2):
add_new_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward,
tdir, tconf, allPluginsPath)
checkViewNoForNodes(txnPoolNodeSet, initial_view_no)
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_steward, 20)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc_backup'])
checkViewNoForNodes(txnPoolNodeSet, initial_view_no)
| apache-2.0 |
onestarshang/flask_super_config | venv/lib/python2.7/site-packages/requests/__init__.py | 412 | 1861 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.7.0'
__build__ = 0x020700
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| gpl-2.0 |
placrosse/ImpalaToGo | thirdparty/thrift-0.9.0/lib/py/src/protocol/TCompactProtocol.py | 97 | 10943 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from TProtocol import *
from struct import pack, unpack
__all__ = ['TCompactProtocol', 'TCompactProtocolFactory']
CLEAR = 0
FIELD_WRITE = 1
VALUE_WRITE = 2
CONTAINER_WRITE = 3
BOOL_WRITE = 4
FIELD_READ = 5
CONTAINER_READ = 6
VALUE_READ = 7
BOOL_READ = 8
def make_helper(v_from, container):
def helper(func):
def nested(self, *args, **kwargs):
assert self.state in (v_from, container), (self.state, v_from, container)
return func(self, *args, **kwargs)
return nested
return helper
writer = make_helper(VALUE_WRITE, CONTAINER_WRITE)
reader = make_helper(VALUE_READ, CONTAINER_READ)
def makeZigZag(n, bits):
return (n << 1) ^ (n >> (bits - 1))
def fromZigZag(n):
return (n >> 1) ^ -(n & 1)
def writeVarint(trans, n):
out = []
while True:
if n & ~0x7f == 0:
out.append(n)
break
else:
out.append((n & 0xff) | 0x80)
n = n >> 7
trans.write(''.join(map(chr, out)))
def readVarint(trans):
result = 0
shift = 0
while True:
x = trans.readAll(1)
byte = ord(x)
result |= (byte & 0x7f) << shift
if byte >> 7 == 0:
return result
shift += 7
class CompactType:
STOP = 0x00
TRUE = 0x01
FALSE = 0x02
BYTE = 0x03
I16 = 0x04
I32 = 0x05
I64 = 0x06
DOUBLE = 0x07
BINARY = 0x08
LIST = 0x09
SET = 0x0A
MAP = 0x0B
STRUCT = 0x0C
CTYPES = {TType.STOP: CompactType.STOP,
TType.BOOL: CompactType.TRUE, # used for collection
TType.BYTE: CompactType.BYTE,
TType.I16: CompactType.I16,
TType.I32: CompactType.I32,
TType.I64: CompactType.I64,
TType.DOUBLE: CompactType.DOUBLE,
TType.STRING: CompactType.BINARY,
TType.STRUCT: CompactType.STRUCT,
TType.LIST: CompactType.LIST,
TType.SET: CompactType.SET,
TType.MAP: CompactType.MAP
}
TTYPES = {}
for k, v in CTYPES.items():
TTYPES[v] = k
TTYPES[CompactType.FALSE] = TType.BOOL
del k
del v
class TCompactProtocol(TProtocolBase):
"""Compact implementation of the Thrift protocol driver."""
PROTOCOL_ID = 0x82
VERSION = 1
VERSION_MASK = 0x1f
TYPE_MASK = 0xe0
TYPE_SHIFT_AMOUNT = 5
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.state = CLEAR
self.__last_fid = 0
self.__bool_fid = None
self.__bool_value = None
self.__structs = []
self.__containers = []
def __writeVarint(self, n):
writeVarint(self.trans, n)
def writeMessageBegin(self, name, type, seqid):
assert self.state == CLEAR
self.__writeUByte(self.PROTOCOL_ID)
self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT))
self.__writeVarint(seqid)
self.__writeString(name)
self.state = VALUE_WRITE
def writeMessageEnd(self):
assert self.state == VALUE_WRITE
self.state = CLEAR
def writeStructBegin(self, name):
assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_WRITE
self.__last_fid = 0
def writeStructEnd(self):
assert self.state == FIELD_WRITE
self.state, self.__last_fid = self.__structs.pop()
def writeFieldStop(self):
self.__writeByte(0)
def __writeFieldHeader(self, type, fid):
delta = fid - self.__last_fid
if 0 < delta <= 15:
self.__writeUByte(delta << 4 | type)
else:
self.__writeByte(type)
self.__writeI16(fid)
self.__last_fid = fid
def writeFieldBegin(self, name, type, fid):
assert self.state == FIELD_WRITE, self.state
if type == TType.BOOL:
self.state = BOOL_WRITE
self.__bool_fid = fid
else:
self.state = VALUE_WRITE
self.__writeFieldHeader(CTYPES[type], fid)
def writeFieldEnd(self):
assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state
self.state = FIELD_WRITE
def __writeUByte(self, byte):
self.trans.write(pack('!B', byte))
def __writeByte(self, byte):
self.trans.write(pack('!b', byte))
def __writeI16(self, i16):
self.__writeVarint(makeZigZag(i16, 16))
def __writeSize(self, i32):
self.__writeVarint(i32)
def writeCollectionBegin(self, etype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size <= 14:
self.__writeUByte(size << 4 | CTYPES[etype])
else:
self.__writeUByte(0xf0 | CTYPES[etype])
self.__writeSize(size)
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
writeSetBegin = writeCollectionBegin
writeListBegin = writeCollectionBegin
def writeMapBegin(self, ktype, vtype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size == 0:
self.__writeByte(0)
else:
self.__writeSize(size)
self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype])
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
def writeCollectionEnd(self):
assert self.state == CONTAINER_WRITE, self.state
self.state = self.__containers.pop()
writeMapEnd = writeCollectionEnd
writeSetEnd = writeCollectionEnd
writeListEnd = writeCollectionEnd
def writeBool(self, bool):
if self.state == BOOL_WRITE:
if bool:
ctype = CompactType.TRUE
else:
ctype = CompactType.FALSE
self.__writeFieldHeader(ctype, self.__bool_fid)
elif self.state == CONTAINER_WRITE:
if bool:
self.__writeByte(CompactType.TRUE)
else:
self.__writeByte(CompactType.FALSE)
else:
raise AssertionError("Invalid state in compact protocol")
writeByte = writer(__writeByte)
writeI16 = writer(__writeI16)
@writer
def writeI32(self, i32):
self.__writeVarint(makeZigZag(i32, 32))
@writer
def writeI64(self, i64):
self.__writeVarint(makeZigZag(i64, 64))
@writer
def writeDouble(self, dub):
self.trans.write(pack('!d', dub))
def __writeString(self, s):
self.__writeSize(len(s))
self.trans.write(s)
writeString = writer(__writeString)
def readFieldBegin(self):
assert self.state == FIELD_READ, self.state
type = self.__readUByte()
if type & 0x0f == TType.STOP:
return (None, 0, 0)
delta = type >> 4
if delta == 0:
fid = self.__readI16()
else:
fid = self.__last_fid + delta
self.__last_fid = fid
type = type & 0x0f
if type == CompactType.TRUE:
self.state = BOOL_READ
self.__bool_value = True
elif type == CompactType.FALSE:
self.state = BOOL_READ
self.__bool_value = False
else:
self.state = VALUE_READ
return (None, self.__getTType(type), fid)
def readFieldEnd(self):
assert self.state in (VALUE_READ, BOOL_READ), self.state
self.state = FIELD_READ
def __readUByte(self):
result, = unpack('!B', self.trans.readAll(1))
return result
def __readByte(self):
result, = unpack('!b', self.trans.readAll(1))
return result
def __readVarint(self):
return readVarint(self.trans)
def __readZigZag(self):
return fromZigZag(self.__readVarint())
def __readSize(self):
result = self.__readVarint()
if result < 0:
raise TException("Length < 0")
return result
def readMessageBegin(self):
assert self.state == CLEAR
proto_id = self.__readUByte()
if proto_id != self.PROTOCOL_ID:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad protocol id in the message: %d' % proto_id)
ver_type = self.__readUByte()
type = (ver_type & self.TYPE_MASK) >> self.TYPE_SHIFT_AMOUNT
version = ver_type & self.VERSION_MASK
if version != self.VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version: %d (expect %d)' % (version, self.VERSION))
seqid = self.__readVarint()
name = self.__readString()
return (name, type, seqid)
def readMessageEnd(self):
assert self.state == CLEAR
assert len(self.__structs) == 0
def readStructBegin(self):
assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_READ
self.__last_fid = 0
def readStructEnd(self):
assert self.state == FIELD_READ
self.state, self.__last_fid = self.__structs.pop()
def readCollectionBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size_type = self.__readUByte()
size = size_type >> 4
type = self.__getTType(size_type)
if size == 15:
size = self.__readSize()
self.__containers.append(self.state)
self.state = CONTAINER_READ
return type, size
readSetBegin = readCollectionBegin
readListBegin = readCollectionBegin
def readMapBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size = self.__readSize()
types = 0
if size > 0:
types = self.__readUByte()
vtype = self.__getTType(types)
ktype = self.__getTType(types >> 4)
self.__containers.append(self.state)
self.state = CONTAINER_READ
return (ktype, vtype, size)
def readCollectionEnd(self):
assert self.state == CONTAINER_READ, self.state
self.state = self.__containers.pop()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
readMapEnd = readCollectionEnd
def readBool(self):
if self.state == BOOL_READ:
return self.__bool_value == CompactType.TRUE
elif self.state == CONTAINER_READ:
return self.__readByte() == CompactType.TRUE
else:
raise AssertionError("Invalid state in compact protocol: %d" %
self.state)
readByte = reader(__readByte)
__readI16 = __readZigZag
readI16 = reader(__readZigZag)
readI32 = reader(__readZigZag)
readI64 = reader(__readZigZag)
@reader
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def __readString(self):
len = self.__readSize()
return self.trans.readAll(len)
readString = reader(__readString)
def __getTType(self, byte):
return TTYPES[byte & 0x0f]
class TCompactProtocolFactory:
def __init__(self):
pass
def getProtocol(self, trans):
return TCompactProtocol(trans)
| apache-2.0 |
atmark-techno/atmark-dist | user/mosquitto/mosquitto-1.3.4/test/broker/08-ssl-bridge.py | 19 | 2068 | #!/usr/bin/env python
import os
import subprocess
import socket
import ssl
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
client_id = socket.gethostname()+".bridge_test"
connect_packet = mosq_test.gen_connect(client_id, keepalive=keepalive, clean_session=False, proto_ver=128+3)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "bridge/#", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish("bridge/ssl/test", qos=0, payload="message")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssock = ssl.wrap_socket(sock, ca_certs="../ssl/all-ca.crt", keyfile="../ssl/server.key", certfile="../ssl/server.crt", server_side=True, ssl_version=ssl.PROTOCOL_TLSv1)
ssock.settimeout(20)
ssock.bind(('', 1888))
ssock.listen(5)
broker = subprocess.Popen(['../../src/mosquitto', '-v', '-c', '08-ssl-bridge.conf'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
(bridge, address) = ssock.accept()
bridge.settimeout(20)
if mosq_test.expect_packet(bridge, "connect", connect_packet):
bridge.send(connack_packet)
if mosq_test.expect_packet(bridge, "subscribe", subscribe_packet):
bridge.send(suback_packet)
pub = subprocess.Popen(['./08-ssl-bridge-helper.py'], stdout=subprocess.PIPE)
pub.wait()
if mosq_test.expect_packet(bridge, "publish", publish_packet):
rc = 0
bridge.close()
finally:
try:
bridge.close()
except NameError:
pass
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
ssock.close()
exit(rc)
| gpl-2.0 |
viur-framework/server | tasks.py | 1 | 13731 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from server.update import checkUpdate
from server.config import conf, sharedConf
from server import errors, request
from google.appengine.api import users
from google.appengine.api import taskqueue
from google.appengine.ext.deferred import PermanentTaskFailure
from server import db
from functools import wraps
import json
import logging
import os, sys
_periodicTasks = {}
_callableTasks = {}
_deferedTasks = {}
_startupTasks = []
_periodicTaskID = 1L #Used to determine bound functions
class CallableTaskBase:
"""
Base class for user-callable tasks.
Must be subclassed.
"""
key = None # Unique identifier for this task
name = None # Human-Readable name
descr = None # Human-Readable description
kindName = "server-task"
def canCall( self ):
"""
Checks wherever the current user can execute this task
:returns: bool
"""
return( False )
def dataSkel(self):
"""
If additional data is needed, return a skeleton-instance here.
These values are then passed to *execute*.
"""
return( None )
def execute(self):
"""
The actual code that should be run goes here.
"""
raise NotImplemented()
class TaskHandler:
"""
Task Handler.
Handles calling of Tasks (queued and periodic), and performs updatececks
Do not Modify. Do not Subclass.
"""
adminInfo = None
retryCountWarningThreshold = 25
def __init__(self, moduleName, modulePath ):
pass
def findBoundTask( self, task, obj=None, depth=0 ):
"""
Tries to locate the instance, this function belongs to.
If it succeeds in finding it, it returns the function and its instance (-> its "self").
Otherwise, None is returned.
:param task: A callable decorated with @PeriodicTask
:type task: callable
:param obj: Object, which will be scanned in the current iteration. None means start at conf["viur.mainApp"].
:type obj: object
:param depth: Current iteration depth.
:type depth: int
"""
if depth>3 or not "periodicTaskID" in dir( task ): #Limit the maximum amount of recursions
return( None )
obj = obj or conf["viur.mainApp"]
for attr in dir( obj ):
if attr.startswith("_"):
continue
try:
v = getattr( obj, attr )
except AttributeError:
continue
if callable( v ) and "periodicTaskID" in dir( v ) and str(v.periodicTaskID)==str(task.periodicTaskID):
return( v, obj )
if not isinstance( v, basestring ) and not callable( v ):
res = self.findBoundTask( task, v, depth+1 )
if res:
return( res )
return( None )
def deferred(self, *args, **kwargs ):
"""
This catches one defered call and routes it to its destination
"""
from server import session
from server import utils
global _deferedTasks
req = request.current.get().request
if 'X-AppEngine-TaskName' not in req.headers:
logging.critical('Detected an attempted XSRF attack. The header "X-AppEngine-Taskname" was not set.')
raise errors.Forbidden()
in_prod = ( not req.environ.get("SERVER_SOFTWARE").startswith("Devel") )
if in_prod and req.environ.get("REMOTE_ADDR") != "0.1.0.2":
logging.critical('Detected an attempted XSRF attack. This request did not originate from Task Queue.')
raise errors.Forbidden()
# Check if the retry count exceeds our warning threshold
retryCount = req.headers.get("X-Appengine-Taskretrycount", None)
if retryCount:
if int(retryCount) == self.retryCountWarningThreshold:
utils.sendEMailToAdmins("Deferred task retry count exceeded warning threshold",
"Task %s will now be retried for the %sth time." % (
req.headers.get("X-Appengine-Taskname", ""),
retryCount))
cmd, data = json.loads( req.body )
try:
funcPath, args, kwargs, env = data
except ValueError: #We got an old call without an frozen environment
env = None
funcPath, args, kwargs = data
if env:
if "user" in env and env["user"]:
session.current["user"] = env["user"]
if "lang" in env and env["lang"]:
request.current.get().language = env["lang"]
if "custom" in env and conf["viur.tasks.customEnvironmentHandler"]:
# Check if we need to restore additional enviromental data
assert isinstance(conf["viur.tasks.customEnvironmentHandler"], tuple) \
and len(conf["viur.tasks.customEnvironmentHandler"])==2 \
and callable(conf["viur.tasks.customEnvironmentHandler"][1]), \
"Your customEnvironmentHandler must be a tuple of two callable if set!"
conf["viur.tasks.customEnvironmentHandler"][1](env["custom"])
if cmd=="rel":
caller = conf["viur.mainApp"]
pathlist = [x for x in funcPath.split("/") if x]
for currpath in pathlist:
if currpath not in dir(caller):
logging.error("ViUR missed a deferred task! Could not resolve the path %s. Failed segment was %s", funcPath, currpath)
return
caller = getattr(caller, currpath)
try:
caller(*args, **kwargs)
except PermanentTaskFailure:
pass
except Exception as e:
logging.exception(e)
raise errors.RequestTimeout() #Task-API should retry
elif cmd=="unb":
if not funcPath in _deferedTasks:
logging.error("Ive missed a defered task! %s(%s,%s)" % (funcPath,str(args), str(kwargs)))
try:
_deferedTasks[ funcPath](*args, **kwargs)
except PermanentTaskFailure:
pass
except Exception as e:
logging.exception(e)
raise errors.RequestTimeout() #Task-API should retry
deferred.exposed=True
def index(self, *args, **kwargs):
global _callableTasks, _periodicTasks
logging.debug("Starting maintenance-run")
checkUpdate() #Let the update-module verify the database layout first
logging.debug("Updatecheck complete")
for task,intervall in _periodicTasks.items(): #Call all periodic tasks
if intervall: #Ensure this task doesn't get called to often
try:
lastCall = db.Get( db.Key.from_path( "viur-task-interval", task.periodicTaskName ) )
if lastCall["date"] > datetime.now()-timedelta( minutes=intervall ):
logging.debug("Skipping task %s - Has already run recently." % task.periodicTaskName )
continue
except db.EntityNotFoundError:
pass
res = self.findBoundTask( task )
if res: #Its bound, call it this way :)
res[0]()
else:
task() #It seems it wasnt bound - call it as a static method
logging.debug("Successfully called task %s" % task.periodicTaskName )
if intervall:
# Update its last-call timestamp
entry = db.Entity( "viur-task-interval", name=task.periodicTaskName )
entry["date"] = datetime.now()
db.Put( entry )
logging.debug("Periodic tasks complete")
for currentTask in db.Query("viur-queued-tasks").iter(): #Look for queued tasks
db.Delete( currentTask.key() )
if currentTask["taskid"] in _callableTasks:
task = _callableTasks[ currentTask["taskid"] ]()
tmpDict = {}
for k in currentTask.keys():
if k == "taskid":
continue
tmpDict[ k ] = json.loads( currentTask[ k ] )
try:
task.execute( **tmpDict )
except Exception as e:
logging.error("Error executing Task")
logging.exception( e )
logging.debug("Scheduled tasks complete")
index.exposed=True
def list(self, *args, **kwargs ):
"""Lists all user-callabe tasks which are callable by this user"""
global _callableTasks
class extList( list ):
pass
res = extList( [{"key": x.key, "name":_(x.name), "descr":_(x.descr) } for x in _callableTasks.values() if x().canCall()] )
res.cursor = None
res.baseSkel = {}
return( self.render.list( res ) )
list.exposed=True
def execute(self, taskID, *args, **kwargs ):
"""Queues a specific task for the next maintenance run"""
global _callableTasks
from server import securitykey
if taskID in _callableTasks:
task = _callableTasks[ taskID ]()
else:
return
if not task.canCall():
raise errors.Unauthorized()
skel = task.dataSkel()
if "skey" in kwargs:
skey = kwargs["skey"]
else:
skey = ""
if len(kwargs)==0 or skey=="" or not skel.fromClient(kwargs) or ("bounce" in kwargs and kwargs["bounce"]=="1"):
return self.render.add( skel )
if not securitykey.validate(skey):
raise errors.PreconditionFailed()
task.execute( **skel.getValues() )
return self.render.addItemSuccess( skel )
execute.exposed = True
TaskHandler.admin = True
TaskHandler.vi = True
TaskHandler.html = True
## Decorators ##
def noRetry( f ):
"""Prevents a deferred Function from beeing called a second time"""
@wraps( f )
def wrappedFunc( *args, **kwargs ):
try:
f( *args, **kwargs )
except Exception as e:
logging.exception(e)
raise PermanentTaskFailure()
return( wrappedFunc )
def callDeferred( func ):
"""
This is a decorator, which allways calls the function deferred.
Unlike Googles implementation, this one works (with bound functions)
"""
if "viur_doc_build" in dir(sys):
return(func)
__undefinedFlag_ = object()
def mkDefered( func, self=__undefinedFlag_, *args, **kwargs ):
from server.utils import getCurrentUser
try:
req = request.current.get()
except: #This will fail for warmup requests
req = None
if req is not None and "HTTP_X_APPENGINE_TASKRETRYCOUNT".lower() in [x.lower() for x in os.environ.keys()] and not "DEFERED_TASK_CALLED" in dir( req ): #This is the deferred call
req.DEFERED_TASK_CALLED = True #Defer recursive calls to an deferred function again.
if self is __undefinedFlag_:
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
else:
try:
funcPath = "%s/%s" % (self.modulePath, func.func_name )
command = "rel"
except:
funcPath = "%s.%s" % ( func.__name__, func.__module__ )
if self!=__undefinedFlag_:
args = (self,)+args #Reappend self to args, as this function is (hopefully) unbound
command = "unb"
taskargs = dict((x, kwargs.pop(("_%s" % x), None)) for x in ("countdown", "eta", "name", "target", "retry_options"))
taskargs["url"] = "/_tasks/deferred"
transactional = kwargs.pop("_transactional", False)
taskargs["headers"] = {"Content-Type": "application/octet-stream"}
queue = kwargs.pop("_queue", "default")
# Try to preserve the important data from the current environment
env = {"user": None}
usr = getCurrentUser()
if usr:
env["user"] = {"key": usr["key"],
"name": usr["name"],
"access": usr["access"]}
try:
env["lang"] = request.current.get().language
except AttributeError: #This isn't originating from a normal request
pass
if conf["viur.tasks.customEnvironmentHandler"]:
# Check if this project relies on additional environmental variables and serialize them too
assert isinstance(conf["viur.tasks.customEnvironmentHandler"], tuple) \
and len(conf["viur.tasks.customEnvironmentHandler"])==2 \
and callable(conf["viur.tasks.customEnvironmentHandler"][0]), \
"Your customEnvironmentHandler must be a tuple of two callable if set!"
env["custom"] = conf["viur.tasks.customEnvironmentHandler"][0]()
pickled = json.dumps((command, (funcPath, args, kwargs, env)))
task = taskqueue.Task(payload=pickled, **taskargs)
return task.add(queue, transactional=transactional)
global _deferedTasks
_deferedTasks[ "%s.%s" % ( func.__name__, func.__module__ ) ] = func
return( lambda *args, **kwargs: mkDefered( func, *args, **kwargs) )
def PeriodicTask( intervall ):
"""
Decorator to call a function periodic during maintenance.
Intervall defines a lower bound for the call-frequency for this task;
it will not be called faster than each intervall minutes.
(Note that the actual delay between two sequent might be much larger)
:param intervall: Call at most every intervall minutes. 0 means call as often as possible.
:type intervall: int
"""
def mkDecorator( fn ):
global _periodicTasks, _periodicTaskID
_periodicTasks[ fn ] = intervall
fn.periodicTaskID = _periodicTaskID
fn.periodicTaskName = "%s.%s" % ( fn.__module__, fn.__name__ )
_periodicTaskID += 1
return( fn )
return( mkDecorator )
def CallableTask( fn ):
"""Marks a Class as representing a user-callable Task.
It *should* extend CallableTaskBase and *must* provide
its API
"""
global _callableTasks
_callableTasks[ fn.key ] = fn
return( fn )
def StartupTask( fn ):
"""
Functions decorated with this are called shortly at instance startup.
It's *not* guaranteed that they actually run on the instance that just started up!
Wrapped functions must not take any arguments.
"""
global _startupTasks
_startupTasks.append( fn )
return( fn )
@callDeferred
def runStartupTasks():
"""
Runs all queued startupTasks.
Do not call directly!
"""
global _startupTasks
for st in _startupTasks:
st()
## Tasks ##
@CallableTask
class DisableApplicationTask( CallableTaskBase ):
"""
Allows en- or disabling the application.
"""
key = "viur-disable-server"
name = "Enable or disable the application"
descr = "This will enable or disable the application."
kindName = "server-task"
def canCall( self ):
"""
Checks wherever the current user can execute this task
:returns: bool
"""
return( users.is_current_user_admin() )
def dataSkel( self ):
from server.bones import booleanBone, stringBone
from server.skeleton import Skeleton
skel = Skeleton( self.kindName )
skel.active = booleanBone( descr="Application active", required=True )
skel.descr = stringBone( descr="Reason for disabling", required=False )
return( skel )
def execute(self, active, descr, *args, **kwargs):
if not active:
if descr:
sharedConf["viur.disabled"] = descr
else:
sharedConf["viur.disabled"] = True
else:
sharedConf["viur.disabled"] = False
| lgpl-3.0 |
sajeeshcs/nested_quota_final | nova/cells/opts.py | 13 | 2439 | # Copyright (c) 2012 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Global cells config options
"""
from oslo_config import cfg
cells_opts = [
cfg.BoolOpt('enable',
default=False,
help='Enable cell functionality'),
cfg.StrOpt('topic',
default='cells',
help='The topic cells nodes listen on'),
cfg.StrOpt('manager',
default='nova.cells.manager.CellsManager',
help='Manager for cells'),
cfg.StrOpt('name',
default='nova',
help='Name of this cell'),
cfg.ListOpt('capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help='Key/Multi-value list with the capabilities of the cell'),
cfg.IntOpt('call_timeout',
default=60,
help='Seconds to wait for response from a call to a cell.'),
cfg.FloatOpt('reserve_percent',
default=10.0,
help='Percentage of cell capacity to hold in reserve. '
'Affects both memory and disk utilization'),
cfg.StrOpt('cell_type',
default='compute',
help='Type of cell: api or compute'),
cfg.IntOpt("mute_child_interval",
default=300,
help='Number of seconds after which a lack of capability and '
'capacity updates signals the child cell is to be '
'treated as a mute.'),
cfg.IntOpt('bandwidth_update_interval',
default=600,
help='Seconds between bandwidth updates for cells.'),
]
CONF = cfg.CONF
CONF.register_opts(cells_opts, group='cells')
def get_cell_type():
"""Return the cell type, 'api', 'compute', or None (if cells is disabled).
"""
if not CONF.cells.enable:
return
return CONF.cells.cell_type
| apache-2.0 |
CroissanceCommune/autonomie | autonomie/models/commercial.py | 1 | 2143 | # -*- coding: utf-8 -*-
# * Copyright (C) 2012-2013 Croissance Commune
# * Authors:
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
# * Pettier Gabriel;
# * TJEBBES Gaston <g.t@majerti.fr>
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
#
"""
Models related to the treasury module
"""
from sqlalchemy import (
Column,
Integer,
Text,
ForeignKey,
BigInteger,
)
from sqlalchemy.orm import (
relationship,
backref,
)
from autonomie_base.models.base import (
DBBASE,
default_table_args,
)
class TurnoverProjection(DBBASE):
"""
Turnover projection
:param company_id: The company this projection is related to
:param month: The month number this projection is made for
:param year: The year this projection is made for
"""
__tablename__ = 'turnover_projection'
__table_args__ = default_table_args
id = Column(Integer, primary_key=True)
company_id = Column(Integer, ForeignKey("company.id", ondelete="cascade"))
month = Column(Integer)
year = Column(Integer)
comment = Column(Text, default="")
value = Column(BigInteger)
company = relationship(
"Company",
backref=backref(
"turnoverprojections",
order_by="TurnoverProjection.month",
cascade="all, delete-orphan",
info={
'export': {'exclude': True},
},
)
)
| gpl-3.0 |
specify/specify7 | specifyweb/workbench/upload/tests/testmustmatch.py | 1 | 6636 | import json
from jsonschema import validate # type: ignore
from typing import List, Dict, Any, NamedTuple, Union
from .base import UploadTestsBase, get_table
from ..upload_result import Uploaded, Matched, NoMatch, NullRecord, ParseFailures, FailedBusinessRule
from ..upload import do_upload, do_upload_csv
from ..upload_table import UploadTable, MustMatchTable
from ..treerecord import TreeRecord, MustMatchTreeRecord
from ..upload_plan_schema import schema, parse_plan, parse_column_options
class MustMatchTests(UploadTestsBase):
def setUp(self) -> None:
super().setUp()
get_table('Collectingevent').objects.create(
stationfieldnumber='1',
discipline=self.discipline,
)
def upload_some_geography(self) -> None:
plan_json = dict(
baseTableName = 'Geography',
uploadable = { 'treeRecord': dict(
ranks = {
'Continent': 'Continent',
'Country': 'Country',
'State': 'State',
'County': 'County',
}
)}
)
validate(plan_json, schema)
scoped_plan = parse_plan(self.collection, plan_json).apply_scoping(self.collection)
data = [
dict(name="Douglas Co. KS", Continent="North America", Country="USA", State="Kansas", County="Douglas"),
dict(name="Greene Co. MO", Continent="North America", Country="USA", State="Missouri", County="Greene")
]
results = do_upload(self.collection, data, scoped_plan, self.agent.id)
for r in results:
assert isinstance(r.record_result, Uploaded)
def plan(self, must_match: bool) -> Dict:
reltype = 'mustMatchTable' if must_match else 'uploadTable'
return dict(
baseTableName = 'Collectionobject',
uploadable = { 'uploadTable': dict(
wbcols = {
'catalognumber' : "catno",
},
static = {},
toMany = {},
toOne = {
'collectingevent': { reltype: dict(
wbcols = {
'stationfieldnumber' : 'sfn',
},
static = {},
toOne = {},
toMany = {}
)}
}
)}
)
def test_mustmatchtree(self) -> None:
self.upload_some_geography()
json = dict(
baseTableName = 'Locality',
uploadable = { 'uploadTable': dict(
wbcols = {
'localityname' : "name",
},
static = {},
toMany = {},
toOne = {
'geography': { 'mustMatchTreeRecord': dict(
ranks = {
'Continent': 'Continent',
'Country': 'Country',
'State': 'State',
'County': 'County',
}
)}
}
)}
)
validate(json, schema)
plan = parse_plan(self.collection, json)
assert isinstance(plan, UploadTable)
assert isinstance(plan.toOne['geography'], TreeRecord)
assert isinstance(plan.toOne['geography'], MustMatchTreeRecord)
scoped_plan = plan.apply_scoping(self.collection)
data = [
dict(name="Douglas Co. KS", Continent="North America", Country="USA", State="Kansas", County="Douglas"),
dict(name="Emerald City", Continent="North America", Country="USA", State="Kansas", County="Oz"),
]
results = do_upload(self.collection, data, scoped_plan, self.agent.id)
self.assertIsInstance(results[0].record_result, Uploaded)
self.assertNotIsInstance(results[1].record_result, Uploaded)
self.assertIsInstance(results[1].toOne['geography'].record_result, NoMatch)
def test_mustmatch_parsing(self) -> None:
json = self.plan(must_match=True)
validate(json, schema)
plan = parse_plan(self.collection, json)
assert isinstance(plan, UploadTable)
assert isinstance(plan.toOne['collectingevent'], UploadTable)
self.assertIsInstance(plan.toOne['collectingevent'], MustMatchTable)
def test_mustmatch_uploading(self) -> None:
plan = parse_plan(self.collection, self.plan(must_match=True)).apply_scoping(self.collection)
data = [
dict(catno='0', sfn='1'),
dict(catno='1', sfn='2'),
dict(catno='2', sfn='1'),
dict(catno='3', sfn='2'),
]
starting_ce_count = get_table('Collectingevent').objects.count()
starting_co_count = get_table('Collectionobject').objects.count()
results = do_upload(self.collection, data, plan, self.agent.id)
for r, expected in zip(results, [Matched, NoMatch, Matched, NoMatch]):
self.assertIsInstance(r.toOne['collectingevent'].record_result, expected)
cos = get_table('Collectionobject').objects.count()
self.assertEqual(starting_co_count + 2, cos, "Two collection objects were created")
self.assertEqual(starting_ce_count, get_table('Collectingevent').objects.count(),
"there are an equal number of collecting events before and after the upload")
def test_mustmatch_with_null(self) -> None:
plan = parse_plan(self.collection, self.plan(must_match=True)).apply_scoping(self.collection)
data = [
dict(catno='0', sfn='1'),
dict(catno='1', sfn='2'),
dict(catno='2', sfn=''),
dict(catno='3', sfn='1'),
dict(catno='4', sfn='2'),
]
ce_count_before_upload = get_table('Collectingevent').objects.count()
results = do_upload(self.collection, data, plan, self.agent.id)
ces = set()
for r, expected in zip(results, [Matched, NoMatch, NullRecord, Matched, NoMatch]):
self.assertIsInstance(r.toOne['collectingevent'].record_result, expected)
if not r.contains_failure():
ce = get_table('Collectionobject').objects.get(id=r.record_result.get_id()).collectingevent_id
if expected is NullRecord:
self.assertIsNone(ce)
else:
ces.add(ce)
self.assertEqual(1, len(ces))
self.assertEqual(ce_count_before_upload, get_table('Collectingevent').objects.count())
| gpl-2.0 |
labsanmartin/Bika-LIMS | bika/lims/exportimport/instruments/alere/pima/__init__.py | 4 | 3686 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Alere Pima
"""
from datetime import datetime
from bika.lims.utils import to_unicode
from bika.lims import bikaMessageFactory as _
from bika.lims.exportimport.instruments.resultsimport import \
AnalysisResultsImporter, InstrumentCSVResultsFileParser
class AlerePimaSLKParser(InstrumentCSVResultsFileParser):
#This class is made thinking in beads, but the other files
# are quite similar.
def __init__(self, slk):
InstrumentCSVResultsFileParser.__init__(self, slk)
self._columns = {} #The diferents data columns names
self._linedata = {}#The line with the data
self._rownum = None
self._isFirst = True #Used to know if is the first linedata
def parse(self):
infile = self.getInputFile()
self.log("Parsing file ${file_name}", mapping={"file_name":infile.filename})
for line in infile.readlines():
line = line.split(';')
#The end of file
if line[0] == 'E\n':
self.log(
"End of file reached successfully: ${total_objects} objects, "
"${total_analyses} analyses, ${total_results} results",
mapping={"total_objects": self.getObjectsTotalCount(),
"total_analyses": self.getAnalysesTotalCount(),
"total_results":self.getResultsTotalCount()}
)
self.builddict(self._isFirst)
return True
#The header
elif line[0] != 'C' and line[0] != 'E':
self._header[line[0]] = line[1]
#Obtain the columns name
elif line[0] == 'C' and line[2] == 'Y1':
self._columns[line[1]] = line[3].split('"')[1]
#self._columns.append(line[3].split('"')[1])
#Is a data line
else:
if line[2] != self._rownum:
self.builddict(self._isFirst)
self._linedata = {}
self._rownum = line[2]
data = line[3].split('"')
if len(data) >1:
self._linedata[line[1]] = data[1]
else:
self._linedata[line[1]] = data[0][1:-1]
def builddict(self,isFirst):
#Buid the dict parsing self._columns and self._linedata
#This method should be modified to read other files
rawdict = {}
if self._isFirst:
self._isFirst = False
else:
for i in self._columns.keys():
if i in self._linedata:
rawdict[self._columns[i]] = self._linedata[i]
else:
rawdict[self._columns[i]] = None
rawdict['Remarks'] = rawdict['ErrorMessage']
rawdict['DefaultResult'] = self._columns['X6']
#I don't know which is the analysis service keyword...
self._addRawResult(rawdict['Sample'],{rawdict['Assay ID']:rawdict})
def getAttachmentFileType(self):
#This method must be override for other file types.
return "Alare Pima Beads"
class AlerePimaImporter(AnalysisResultsImporter):
def __init__(self, parser, context, idsearchcriteria, override,
allowed_ar_states=None, allowed_analysis_states=None,
instrument_uid=None):
AnalysisResultsImporter.__init__(self, parser, context,
idsearchcriteria, override,
allowed_ar_states,
allowed_analysis_states,
instrument_uid)
| agpl-3.0 |
Ms2ger/servo | tests/wpt/web-platform-tests/subresource-integrity/tools/list_hashes.py | 191 | 1440 | from os import path, listdir
from hashlib import sha512, sha384, sha256, md5
from base64 import b64encode
import re
DIR = path.normpath(path.join(__file__, "..", ".."))
'''
Yield each javascript and css file in the directory
'''
def js_and_css_files():
for f in listdir(DIR):
if path.isfile(f) and (f.endswith(".js") or f.endswith(".css")):
yield f
'''
URL-safe base64 encode a binary digest and strip any padding.
'''
def format_digest(digest):
return b64encode(digest)
'''
Generate an encoded sha512 URI.
'''
def sha512_uri(content):
return "sha512-%s" % format_digest(sha512(content).digest())
'''
Generate an encoded sha384 URI.
'''
def sha384_uri(content):
return "sha384-%s" % format_digest(sha384(content).digest())
'''
Generate an encoded sha256 URI.
'''
def sha256_uri(content):
return "sha256-%s" % format_digest(sha256(content).digest())
'''
Generate an encoded md5 digest URI.
'''
def md5_uri(content):
return "md5-%s" % format_digest(md5(content).digest())
def main():
for file in js_and_css_files():
print "Listing hash values for %s" % file
with open(file, "r") as content_file:
content = content_file.read()
print "\tSHA512 integrity: %s" % sha512_uri(content)
print "\tSHA384 integrity: %s" % sha384_uri(content)
print "\tSHA256 integrity: %s" % sha256_uri(content)
print "\tMD5 integrity: %s" % md5_uri(content)
if __name__ == "__main__":
main()
| mpl-2.0 |
geokrety/geokrety-api | tests/unittests/utils/payload/news.py | 2 | 1182 | # -*- coding: utf-8 -*-
from mixer.backend.flask import mixer
from geokrety_api_models import User
from .base import BasePayload
class NewsPayload(BasePayload):
_url = "/v1/news/{}"
_url_collection = "/v1/news"
_response_type = 'NewsResponse'
_response_type_collection = 'NewsCollectionResponse'
def __init__(self, *args, **kwargs):
super(NewsPayload, self).__init__('news', *args, **kwargs)
def set_title(self, title):
self._set_attribute('title', title)
return self
def set_content(self, content):
self._set_attribute('content', content)
return self
def set_username(self, username):
self._set_attribute('username', username)
return self
def set_author(self, user):
user_id = user.id if isinstance(user, User) else user
self._set_relationships('author', 'user', user_id)
return self
def set_obj(self, obj):
self.set_title(obj.title)
self.set_content(obj.content)
return self
def blend(self):
with mixer.ctx(commit=False):
self.set_obj(mixer.blend('geokrety_api_models.News'))
return self
| gpl-3.0 |
poo12138/gem5-stable | src/dev/x86/I82094AA.py | 69 | 2201 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
from X86IntPin import X86IntSinkPin
class I82094AA(BasicPioDevice):
type = 'I82094AA'
cxx_class = 'X86ISA::I82094AA'
cxx_header = "dev/x86/i82094aa.hh"
apic_id = Param.Int(1, 'APIC id for this IO APIC')
int_master = MasterPort("Port for sending interrupt messages")
int_latency = Param.Latency('1ns', \
"Latency for an interrupt to propagate through this device.")
external_int_pic = Param.I8259(NULL, "External PIC, if any")
def pin(self, line):
return X86IntSinkPin(device=self, number=line)
| bsd-3-clause |
luiseduardohdbackup/odoo | addons/mail/mail_message_subtype.py | 387 | 3094 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
from openerp.osv import fields
class mail_message_subtype(osv.osv):
""" Class holding subtype definition for messages. Subtypes allow to tune
the follower subscription, allowing only some subtypes to be pushed
on the Wall. """
_name = 'mail.message.subtype'
_description = 'Message subtypes'
_order = 'sequence, id'
_columns = {
'name': fields.char('Message Type', required=True, translate=True,
help='Message subtype gives a more precise type on the message, '\
'especially for system notifications. For example, it can be '\
'a notification related to a new record (New), or to a stage '\
'change in a process (Stage change). Message subtypes allow to '\
'precisely tune the notifications the user want to receive on its wall.'),
'description': fields.text('Description', translate=True,
help='Description that will be added in the message posted for this '\
'subtype. If void, the name will be added instead.'),
'parent_id': fields.many2one('mail.message.subtype', string='Parent',
ondelete='set null',
help='Parent subtype, used for automatic subscription.'),
'relation_field': fields.char('Relation field',
help='Field used to link the related model to the subtype model when '\
'using automatic subscription on a related document. The field '\
'is used to compute getattr(related_document.relation_field).'),
'res_model': fields.char('Model',
help="Model the subtype applies to. If False, this subtype applies to all models."),
'default': fields.boolean('Default',
help="Activated by default when subscribing."),
'sequence': fields.integer('Sequence', help="Used to order subtypes."),
'hidden': fields.boolean('Hidden', help="Hide the subtype in the follower options")
}
_defaults = {
'default': True,
'sequence': 1,
}
| agpl-3.0 |
ixs/func | func/overlord/delegation_tools.py | 6 | 7115 | ##
## func delegation tools
## These are some helper methods to make dealing with delegation
## dictionary trees a little more sane when dealing with delegation
## and related functions.
##
## Copyright 2008, Red Hat, Inc.
## Steve Salevan <ssalevan@redhat.com>
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
import fnmatch
class groupby(object):
"""
Borrowing the groupby iterator class directly
from the Python API as it does not exist in Pythons < 2.4
"""
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = xrange(0)
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = self.it.next() # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = self.it.next() # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
def group_paths(ungrouped_list):
"""
Given a list of multi-element path lists,
groups them together into a list of single-element paths (which
exist directly under the current overlord) and a dictionary of paths
to send to next hops in the delegation chain, containing a list of lists
keyed by their common next hop.
"""
single_paths = [path[0] for path in ungrouped_list if len(path) == 1]
non_single_paths = [path for path in ungrouped_list if len(path) > 1]
path_group = dict([(key,[path[1:len(path)] for path in list(gen)])
for key, gen in groupby(non_single_paths,
key=lambda x:x[0])])
return (single_paths,path_group)
def get_paths_for_glob(glob, minionmap):
"""
Given a glob, returns shortest path to all minions
matching it in the delegation dictionary tree
"""
pathlist = []
for elem in match_glob_in_tree(glob,minionmap):
result = get_shortest_path(elem,minionmap)
if result not in pathlist: #prevents duplicates
pathlist.append(result)
return pathlist
def list_all_minions(minionmap):
"""
Given a minion map, returns a flat list of all minions
contained within it
"""
minionlist = []
for minion in minionmap.keys():
if minion not in minionlist:
minionlist.append(minion)
for minion in list_all_minions(minionmap[minion]):
if minion not in minionlist:
minionlist.append(minion)
return minionlist
def flatten_list(bumpy_list):
"""
Flattens gnarly nested lists into much
nicer, flat lists
"""
flat_list = []
for item in bumpy_list:
if isinstance(item, list):
for elem in flatten_list(item):
flat_list.append(elem)
else:
flat_list.append(item)
return flat_list
def match_glob_on_toplevel(pattern, minionmap):
"""
Searches through the top level of a dictionary
for all keys (minion FQDNs) matching the given
glob, returns matches
"""
matched = []
for k,v in minionmap.iteritems():
if fnmatch.fnmatch(k,pattern):
matched.append(k)
return matched
def match_glob_in_tree(pattern, minionmap):
"""
Searches through given tree dictionary for all
keys (minion FQDNs) matching the given glob,
returns matches
"""
matched = []
for k,v in minionmap.iteritems():
for result in match_glob_in_tree(pattern, v):
matched.append(result)
if fnmatch.fnmatch(k,pattern):
matched.append(k)
return matched
def minion_exists_under_node(minion, minionmap):
"""
A little wrapper around the match_glob_on_toplevel
method that you can use if you want to get a boolean
result denoting minion existence under your current
node
"""
return len(match_glob_on_toplevel(minion,minionmap)) > 0
def get_shortest_path(minion, minionmap):
"""
Given a minion that exists in the given tree,
this method returns all paths from the top
node to the minion in the form of a flat list
"""
def lensort(a,b):
if len(a) > len(b):
return 1
return -1
results = get_all_paths(minion,minionmap)
results.sort(lensort)
return results[0]
def get_all_paths(minion, minionmap):
"""
Given a minion that exists in the given tree,
this method returns all paths that exist from the top
node to the minion in the delegation dictionary tree
"""
#This is an ugly kludge of franken-code. If someone with
#more knowledge of graph theory than myself can improve this
#module, please, please do so. - ssalevan 7/2/08
seq_list = []
if minion_exists_under_node(minion, minionmap):
return [[minion]] #minion found, terminate branch
if minionmap == {}:
return [[]] #no minion found, terminate branch
for k,v in minionmap.iteritems():
branch_list = []
branch_list.append(k)
for branchlet in get_all_paths(minion, v):
branch_list.append(branchlet)
single_branch = flatten_list(branch_list)
if minion in single_branch:
seq_list.append(single_branch)
return seq_list
if __name__ == "__main__":
mymap = {'anthony':{'longpath1':{'longpath2':{'longpath3':{}}}},
'phil':{'steve':{'longpath3':{}}},
'tony':{'mike':{'anthony':{}}},
'just_a_minion':{}
}
print "- Testing an element that exists in multiple lists of varying length:"
for elem in match_glob_in_tree('*path3',mymap):
print "Element: %s, all paths: %s" % (elem, get_all_paths(elem,mymap))
print "best path: %s" % get_shortest_path(elem, mymap)
print "- Testing an element that is simply a minion and has no sub-nodes:"
for elem in match_glob_in_tree('*minion',mymap):
print "Element: %s, best path: %s" % (elem, get_shortest_path(elem,mymap))
print "- OK, now the whole thing:"
for elem in match_glob_in_tree('*',mymap):
print "Element: %s, best path: %s" % (elem, get_shortest_path(elem,mymap))
print "- And finally, with all duplicates removed:"
for elem in get_paths_for_glob('*',mymap):
print "Valid Path: %s" % elem
print "- And grouped together:"
print group_paths(get_paths_for_glob('*',mymap))
| gpl-2.0 |
netsamir/dotfiles | files/vim/bundle/YouCompleteMe/third_party/ycmd/cpp/ycm/tests/gmock/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| unlicense |
lude-ma/python-ivi | ivi/agilent/agilentMSOX3024A.py | 1 | 1693 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentMSOX3024A(agilent3000A):
"Agilent InfiniiVision MSOX3024A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO-X 3024A')
super(agilentMSOX34A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._init_channels()
| mit |
kaedroho/django | django/contrib/gis/geos/prototypes/errcheck.py | 98 | 2654 | """
Error checking functions for GEOS ctypes prototype functions.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOSFuncFactory
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
free = GEOSFuncFactory('GEOSFree')
free.argtypes = [c_void_p]
def last_arg_byref(args):
"Return the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Check the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1:
return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
if result == 1:
return True
elif result == 0:
return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
| bsd-3-clause |
infowantstobeseen/pyglet-darwincore | pyglet/image/codecs/__init__.py | 8 | 8615 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Collection of image encoders and decoders.
Modules must subclass ImageDecoder and ImageEncoder for each method of
decoding/encoding they support.
Modules must also implement the two functions::
def get_decoders():
# Return a list of ImageDecoder instances or []
return []
def get_encoders():
# Return a list of ImageEncoder instances or []
return []
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os.path
import sys
_decoders = [] # List of registered ImageDecoders
_decoder_extensions = {} # Map str -> list of matching ImageDecoders
_decoder_animation_extensions = {}
# Map str -> list of matching ImageDecoders
_encoders = [] # List of registered ImageEncoders
_encoder_extensions = {} # Map str -> list of matching ImageEncoders
class ImageDecodeException(Exception):
exception_priority = 10
class ImageEncodeException(Exception):
pass
class ImageDecoder(object):
def get_file_extensions(self):
'''Return a list of accepted file extensions, e.g. ['.png', '.bmp']
Lower-case only.
'''
return []
def get_animation_file_extensions(self):
'''Return a list of accepted file extensions, e.g. ['.gif', '.flc']
Lower-case only.
'''
return []
def decode(self, file, filename):
'''Decode the given file object and return an instance of `Image`.
Throws ImageDecodeException if there is an error. filename
can be a file type hint.
'''
raise NotImplementedError()
def decode_animation(self, file, filename):
'''Decode the given file object and return an instance of `Animation`.
Throws ImageDecodeException if there is an error. filename
can be a file type hint.
'''
raise ImageDecodeException('This decoder cannot decode animations.')
class ImageEncoder(object):
def get_file_extensions(self):
'''Return a list of accepted file extensions, e.g. ['.png', '.bmp']
Lower-case only.
'''
return []
def encode(self, image, file, filename, options={}):
'''Encode the given image to the given file. filename
provides a hint to the file format desired. options are
encoder-specific, and unknown options should be ignored or
issue warnings.
'''
raise NotImplementedError()
def get_encoders(filename=None):
'''Get an ordered list of encoders to attempt. filename can be used
as a hint for the filetype.
'''
encoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
encoders += _encoder_extensions.get(extension, [])
encoders += [e for e in _encoders if e not in encoders]
return encoders
def get_decoders(filename=None):
'''Get an ordered list of decoders to attempt. filename can be used
as a hint for the filetype.
'''
decoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
decoders += _decoder_extensions.get(extension, [])
decoders += [e for e in _decoders if e not in decoders]
return decoders
def get_animation_decoders(filename=None):
'''Get an ordered list of decoders to attempt. filename can be used
as a hint for the filetype.
'''
decoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
decoders += _decoder_animation_extensions.get(extension, [])
decoders += [e for e in _decoders if e not in decoders]
return decoders
def add_decoders(module):
'''Add a decoder module. The module must define `get_decoders`. Once
added, the appropriate decoders defined in the codec will be returned by
pyglet.image.codecs.get_decoders.
'''
for decoder in module.get_decoders():
_decoders.append(decoder)
for extension in decoder.get_file_extensions():
if extension not in _decoder_extensions:
_decoder_extensions[extension] = []
_decoder_extensions[extension].append(decoder)
for extension in decoder.get_animation_file_extensions():
if extension not in _decoder_animation_extensions:
_decoder_animation_extensions[extension] = []
_decoder_animation_extensions[extension].append(decoder)
def add_encoders(module):
'''Add an encoder module. The module must define `get_encoders`. Once
added, the appropriate encoders defined in the codec will be returned by
pyglet.image.codecs.get_encoders.
'''
for encoder in module.get_encoders():
_encoders.append(encoder)
for extension in encoder.get_file_extensions():
if extension not in _encoder_extensions:
_encoder_extensions[extension] = []
_encoder_extensions[extension].append(encoder)
def add_default_image_codecs():
# Add the codecs we know about. These should be listed in order of
# preference. This is called automatically by pyglet.image.
# Compressed texture in DDS format
try:
from pyglet.image.codecs import dds
add_encoders(dds)
add_decoders(dds)
except ImportError:
pass
# Mac OS X default: Quicktime for Carbon, Quartz for Cocoa.
# TODO: Make ctypes Quartz the default for both Carbon & Cocoa.
if sys.platform == 'darwin':
try:
from pyglet import options as pyglet_options
if pyglet_options['darwin_cocoa']:
import pyglet.image.codecs.quartz
add_encoders(quartz)
add_decoders(quartz)
else:
import pyglet.image.codecs.quicktime
add_encoders(quicktime)
add_decoders(quicktime)
except ImportError:
pass
# Windows XP default: GDI+
if sys.platform in ('win32', 'cygwin'):
try:
import pyglet.image.codecs.gdiplus
add_encoders(gdiplus)
add_decoders(gdiplus)
except ImportError:
pass
# Linux default: GdkPixbuf 2.0
if sys.platform.startswith('linux'):
try:
import pyglet.image.codecs.gdkpixbuf2
add_encoders(gdkpixbuf2)
add_decoders(gdkpixbuf2)
except ImportError:
pass
# Fallback: PIL
try:
import pyglet.image.codecs.pil
add_encoders(pil)
add_decoders(pil)
except ImportError:
pass
# Fallback: PNG loader (slow)
try:
import pyglet.image.codecs.png
add_encoders(png)
add_decoders(png)
except ImportError:
pass
# Fallback: BMP loader (slow)
try:
import pyglet.image.codecs.bmp
add_encoders(bmp)
add_decoders(bmp)
except ImportError:
pass
| bsd-3-clause |
ayoubg/gem5-graphics | gem5/src/arch/power/PowerTLB.py | 66 | 1802 | # -*- mode:python -*-
# Copyright (c) 2009 The University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Timothy M. Jones
from m5.SimObject import SimObject
from m5.params import *
class PowerTLB(SimObject):
type = 'PowerTLB'
cxx_class = 'PowerISA::TLB'
cxx_header = 'arch/power/tlb.hh'
size = Param.Int(64, "TLB size")
| bsd-3-clause |
Ozerev/mangos-tbc | contrib/mmap/mmap_extract.py | 21 | 2401 | #!/usr/bin/python
"""
This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import os, sys, threading, time, subprocess
from multiprocessing import cpu_count
from collections import deque
mapList = deque([0,1,530,13,25,30,33,34,35,36,37,42,43,44,47,48,70,90,109,129,169,189,209,229,230,249,269,289,309,329,349,369,
389,409,429,449,450,451,469,489,509,529,531,532,533,534,540,542,543,544,545,546,547,548,550,552,553,554,555,556,557,558,559,
560,562,564,565,566,568,572,580,582,584,585,586,587,588,589,590,591,593,598])
class workerThread(threading.Thread):
def __init__(self, mapID):
threading.Thread.__init__(self)
self.mapID = mapID
def run(self):
name = "Worker for map %u" % (self.mapID)
print "++ %s" % (name)
if sys.platform == 'win32':
stInfo = subprocess.STARTUPINFO()
stInfo.dwFlags |= 0x00000001
stInfo.wShowWindow = 7
cFlags = subprocess.CREATE_NEW_CONSOLE
binName = "MoveMapGen.exe"
else:
stInfo = None
cFlags = 0
binName = "./MoveMapGen"
retcode = subprocess.call([binName, "%u" % (self.mapID),"--silent"], startupinfo=stInfo, creationflags=cFlags)
print "-- %s" % (name)
if __name__ == "__main__":
cpu = cpu_count() - 0 # You can reduce the load by putting 1 instead of 0 if you need to free 1 core/cpu
if cpu < 1:
cpu = 1
print "I will always maintain %u MoveMapGen tasks running in //\n" % (cpu)
while (len(mapList) > 0):
if (threading.active_count() <= cpu):
workerThread(mapList.popleft()).start()
time.sleep(0.1)
| gpl-2.0 |
gaeun/open-event-orga-server | migrations/versions/703eda37e75c_.py | 10 | 1987 | """empty message
Revision ID: 703eda37e75c
Revises: 86fe7df8dca6
Create Date: 2016-08-07 15:11:55.493400
"""
# revision identifiers, used by Alembic.
revision = '703eda37e75c'
down_revision = '86fe7df8dca6'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('pay_by_bank', sa.Boolean(), nullable=True))
op.add_column('events', sa.Column('pay_by_cheque', sa.Boolean(), nullable=True))
op.add_column('events', sa.Column('pay_by_paypal', sa.Boolean(), nullable=True))
op.add_column('events', sa.Column('pay_by_stripe', sa.Boolean(), nullable=True))
op.add_column('events', sa.Column('pay_onsite', sa.Boolean(), nullable=True))
op.add_column('events_version', sa.Column('pay_by_bank', sa.Boolean(), autoincrement=False, nullable=True))
op.add_column('events_version', sa.Column('pay_by_cheque', sa.Boolean(), autoincrement=False, nullable=True))
op.add_column('events_version', sa.Column('pay_by_paypal', sa.Boolean(), autoincrement=False, nullable=True))
op.add_column('events_version', sa.Column('pay_by_stripe', sa.Boolean(), autoincrement=False, nullable=True))
op.add_column('events_version', sa.Column('pay_onsite', sa.Boolean(), autoincrement=False, nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('events_version', 'pay_onsite')
op.drop_column('events_version', 'pay_by_stripe')
op.drop_column('events_version', 'pay_by_paypal')
op.drop_column('events_version', 'pay_by_cheque')
op.drop_column('events_version', 'pay_by_bank')
op.drop_column('events', 'pay_onsite')
op.drop_column('events', 'pay_by_stripe')
op.drop_column('events', 'pay_by_paypal')
op.drop_column('events', 'pay_by_cheque')
op.drop_column('events', 'pay_by_bank')
### end Alembic commands ###
| gpl-3.0 |
motion2015/edx-platform | lms/djangoapps/certificates/migrations/0017_auto__add_certificategenerationconfiguration.py | 103 | 7245 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CertificateGenerationConfiguration'
db.create_table('certificates_certificategenerationconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('certificates', ['CertificateGenerationConfiguration'])
def backwards(self, orm):
# Deleting model 'CertificateGenerationConfiguration'
db.delete_table('certificates_certificategenerationconfiguration')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.certificategenerationconfiguration': {
'Meta': {'object_name': 'CertificateGenerationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
jumpstarter-io/nova | nova/tests/scheduler/test_filters.py | 17 | 8685 | # Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import inspect
import sys
from nova import filters
from nova import loadables
from nova import test
class Filter1(filters.BaseFilter):
"""Test Filter class #1."""
pass
class Filter2(filters.BaseFilter):
"""Test Filter class #2."""
pass
class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
def test_filter_all_recursive_yields(self):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
total_iterations = 200
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
# returning False for 'obj2'. So, 'obj1' will get yielded
# 'total_iterations' number of times before the first filter_all()
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in xrange(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in xrange(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in xrange(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
def test_get_filtered_objects(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
Filter1().AndReturn(filt1_mock)
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
Filter2().AndReturn(filt2_mock)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_classes = [Filter1, Filter2]
result = filter_handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
run_filter_for_index() method returns false
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
Filter1().AndReturn(filt1_mock)
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
Filter2().AndReturn(filt2_mock)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_classes = [Filter1, Filter2]
filter_handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
# Shouldn't be called.
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
Filter1().AndReturn(filt1_mock)
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_classes = [Filter1, Filter2]
result = filter_handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties)
self.assertIsNone(result)
| apache-2.0 |
smmribeiro/intellij-community | python/helpers/pydev/pydev_tests_python/test_frame_evaluator.py | 12 | 2436 | import sys
import threading
import pytest
from pydev_tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from pydev_tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
def get_foo_frame():
frame = sys._getframe()
return frame
class CheckClass(object):
def collect_info(self):
from _pydevd_frame_eval import pydevd_frame_evaluator
thread_info = pydevd_frame_evaluator.get_thread_info_py()
self.thread_info = thread_info
@pytest.mark.parametrize('_times', range(2))
def test_thread_info(_times):
obj = CheckClass()
obj.collect_info()
assert obj.thread_info.additional_info is not None
assert not obj.thread_info.is_pydevd_thread
thread_info = obj.thread_info
obj.collect_info()
assert obj.thread_info is thread_info
obj = CheckClass()
t = threading.Thread(target=obj.collect_info)
t.is_pydev_daemon_thread = True
t.start()
t.join()
assert obj.thread_info.additional_info is None
assert obj.thread_info.is_pydevd_thread
def method():
pass
@pytest.fixture
def _custom_global_dbg():
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
from pydevd import PyDB
curr = GlobalDebuggerHolder.global_dbg
PyDB() # Will make itself current
yield
GlobalDebuggerHolder.global_dbg = curr
@pytest.mark.parametrize('_times', range(2))
def test_func_code_info(_times, _custom_global_dbg):
from _pydevd_frame_eval import pydevd_frame_evaluator
# Must be called before get_func_code_info_py to initialize the _code_extra_index.
pydevd_frame_evaluator.get_thread_info_py()
func_info = pydevd_frame_evaluator.get_func_code_info_py(method.__code__)
assert func_info.co_filename is method.__code__.co_filename
func_info2 = pydevd_frame_evaluator.get_func_code_info_py(method.__code__)
assert func_info is func_info2
some_func = eval('lambda:0')
func_info3 = pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__)
del some_func
del func_info3
some_func = eval('lambda:0')
pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__)
func_info = pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__)
assert pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__) is func_info
| apache-2.0 |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/predicates.py | 114 | 1787 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args:
argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| gpl-2.0 |
nweedon/pyfbx | test/consistency_test.py | 1 | 2967 | '''
Copyright (c) 2014, NIALL FREDERICK WEEDON
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from pytest import fixture
from .unit import before
from ..pyfbx.FBXVertices import FBXVertices
from ..pyfbx.FBXNormals import FBXNormals
from ..pyfbx.FBXHeader import FBXHeader
from ..pyfbx.FBXTextures import FBXTextures
def test_vertices_consistency(before):
# Test each instance of model data. Although the reader
# will eventually parse different FBX file versions, the output
# should be the same for all of them.
for i in range(0, len(before['model_data'])):
print("Testing: " + before['files'][i])
fbxVertices = FBXVertices(before['model_data'][i])
jsonOut = fbxVertices.get()["VertexIndices"]
# Test first and last values
assert jsonOut[0] == [84, 88, -7]
assert jsonOut[len(jsonOut) - 1] == [1123, 1125, -1122]
jsonOut = fbxVertices.get()["Vertices"]
assert jsonOut[0] == [4.894176483154297, -5.2721147537231445, 33.48030090332031]
assert jsonOut[len(jsonOut) - 1] == [27.58094024658203, 0.4144550561904907, 26.248268127441406]
# 2011 and 2013 export UV's differently to 2012, so we
# can only check if the format of UVs is consistent for the
# first entry.
fbxTextures = FBXTextures(before['model_data'][i])
jsonOut = fbxTextures.get()["UVIndices"]
assert jsonOut[0] == [0, 3, 2]
# Edge and normal values are idempotent per version, but
# export slightly differently across versions. As such, consistency
# tests cannot be run against these (until I find out whether there
# is something in the headers)
| bsd-3-clause |
google-code/android-scripting | python/src/Lib/plat-irix6/cddb.py | 66 | 7218 | # This file implements a class which forms an interface to the .cddb
# directory that is maintained by SGI's cdman program.
#
# Usage is as follows:
#
# import readcd
# r = readcd.Readcd()
# c = Cddb(r.gettrackinfo())
#
# Now you can use c.artist, c.title and c.track[trackno] (where trackno
# starts at 1). When the CD is not recognized, all values will be the empty
# string.
# It is also possible to set the above mentioned variables to new values.
# You can then use c.write() to write out the changed values to the
# .cdplayerrc file.
from warnings import warnpy3k
warnpy3k("the cddb module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import string, posix, os
_cddbrc = '.cddb'
_DB_ID_NTRACKS = 5
_dbid_map = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ@_=+abcdefghijklmnopqrstuvwxyz'
def _dbid(v):
if v >= len(_dbid_map):
return string.zfill(v, 2)
else:
return _dbid_map[v]
def tochash(toc):
if type(toc) == type(''):
tracklist = []
for i in range(2, len(toc), 4):
tracklist.append((None,
(int(toc[i:i+2]),
int(toc[i+2:i+4]))))
else:
tracklist = toc
ntracks = len(tracklist)
hash = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
if ntracks <= _DB_ID_NTRACKS:
nidtracks = ntracks
else:
nidtracks = _DB_ID_NTRACKS - 1
min = 0
sec = 0
for track in tracklist:
start, length = track
min = min + length[0]
sec = sec + length[1]
min = min + sec / 60
sec = sec % 60
hash = hash + _dbid(min) + _dbid(sec)
for i in range(nidtracks):
start, length = tracklist[i]
hash = hash + _dbid(length[0]) + _dbid(length[1])
return hash
class Cddb:
def __init__(self, tracklist):
if os.environ.has_key('CDDB_PATH'):
path = os.environ['CDDB_PATH']
cddb_path = path.split(',')
else:
home = os.environ['HOME']
cddb_path = [home + '/' + _cddbrc]
self._get_id(tracklist)
for dir in cddb_path:
file = dir + '/' + self.id + '.rdb'
try:
f = open(file, 'r')
self.file = file
break
except IOError:
pass
ntracks = int(self.id[:2], 16)
self.artist = ''
self.title = ''
self.track = [None] + [''] * ntracks
self.trackartist = [None] + [''] * ntracks
self.notes = []
if not hasattr(self, 'file'):
return
import re
reg = re.compile(r'^([^.]*)\.([^:]*):[\t ]+(.*)')
while 1:
line = f.readline()
if not line:
break
match = reg.match(line)
if not match:
print 'syntax error in ' + file
continue
name1, name2, value = match.group(1, 2, 3)
if name1 == 'album':
if name2 == 'artist':
self.artist = value
elif name2 == 'title':
self.title = value
elif name2 == 'toc':
if not self.toc:
self.toc = value
if self.toc != value:
print 'toc\'s don\'t match'
elif name2 == 'notes':
self.notes.append(value)
elif name1[:5] == 'track':
try:
trackno = int(name1[5:])
except ValueError:
print 'syntax error in ' + file
continue
if trackno > ntracks:
print 'track number %r in file %s out of range' % (trackno, file)
continue
if name2 == 'title':
self.track[trackno] = value
elif name2 == 'artist':
self.trackartist[trackno] = value
f.close()
for i in range(2, len(self.track)):
track = self.track[i]
# if track title starts with `,', use initial part
# of previous track's title
if track and track[0] == ',':
try:
off = self.track[i - 1].index(',')
except ValueError:
pass
else:
self.track[i] = self.track[i-1][:off] \
+ track
def _get_id(self, tracklist):
# fill in self.id and self.toc.
# if the argument is a string ending in .rdb, the part
# upto the suffix is taken as the id.
if type(tracklist) == type(''):
if tracklist[-4:] == '.rdb':
self.id = tracklist[:-4]
self.toc = ''
return
t = []
for i in range(2, len(tracklist), 4):
t.append((None, \
(int(tracklist[i:i+2]), \
int(tracklist[i+2:i+4]))))
tracklist = t
ntracks = len(tracklist)
self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
if ntracks <= _DB_ID_NTRACKS:
nidtracks = ntracks
else:
nidtracks = _DB_ID_NTRACKS - 1
min = 0
sec = 0
for track in tracklist:
start, length = track
min = min + length[0]
sec = sec + length[1]
min = min + sec / 60
sec = sec % 60
self.id = self.id + _dbid(min) + _dbid(sec)
for i in range(nidtracks):
start, length = tracklist[i]
self.id = self.id + _dbid(length[0]) + _dbid(length[1])
self.toc = string.zfill(ntracks, 2)
for track in tracklist:
start, length = track
self.toc = self.toc + string.zfill(length[0], 2) + \
string.zfill(length[1], 2)
def write(self):
import posixpath
if os.environ.has_key('CDDB_WRITE_DIR'):
dir = os.environ['CDDB_WRITE_DIR']
else:
dir = os.environ['HOME'] + '/' + _cddbrc
file = dir + '/' + self.id + '.rdb'
if posixpath.exists(file):
# make backup copy
posix.rename(file, file + '~')
f = open(file, 'w')
f.write('album.title:\t' + self.title + '\n')
f.write('album.artist:\t' + self.artist + '\n')
f.write('album.toc:\t' + self.toc + '\n')
for note in self.notes:
f.write('album.notes:\t' + note + '\n')
prevpref = None
for i in range(1, len(self.track)):
if self.trackartist[i]:
f.write('track%r.artist:\t%s\n' % (i, self.trackartist[i]))
track = self.track[i]
try:
off = track.index(',')
except ValueError:
prevpref = None
else:
if prevpref and track[:off] == prevpref:
track = track[off:]
else:
prevpref = track[:off]
f.write('track%r.title:\t%s\n' % (i, track))
f.close()
| apache-2.0 |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/redirect.py | 1 | 15179 |
import ops.cmd
import dsz
import dsz.cmd
import util.ip
from ops.cmd import OpsCommandException
TCP = 'tcp'
UDP = 'udp'
IMPLANTLISTEN = 'implantlisten'
LPLISTEN = 'lplisten'
VALID_OPTIONS = ['lplisten', 'implantlisten', 'target', 'tcp', 'udp', 'portsharing', 'connections', 'limitconnections', 'sendnotify', 'packetsize']
class RedirectCommand(ops.cmd.DszCommand, ):
def __init__(self, plugin='redirect', lplisten=None, implantlisten=None, target=None, **optdict):
self._listenport = (-1)
self._bindAddr = '0.0.0.0'
self._direction = None
self._clientPort = (-1)
self._clientAddr = '0.0.0.0'
self._targetAddr = '0.0.0.0'
self._targetPort = (-1)
self._sourceAddr = '0.0.0.0'
self._sourcePort = (-1)
self._limitAddr = '0.0.0.0'
self._limitMask = '0.0.0.0'
self.optdict = optdict
if ('protocol' in optdict):
self.protocol = optdict['protocol']
del optdict['protocol']
elif ('tcp' in optdict):
self.protocol = 'tcp'
elif ('udp' in optdict):
self.protocol = 'udp'
if ((lplisten is not None) and (implantlisten is not None)):
raise OpsCommandException('You can only set one of lplisten and implantlisten')
elif (lplisten is not None):
if ((type(lplisten) == bool) and lplisten):
self.direction = 'lplisten'
else:
self.lplisten = lplisten
elif (implantlisten is not None):
if ((type(implantlisten) == bool) and implantlisten):
self.direction = 'implantlisten'
else:
self.implantlisten = implantlisten
self.target = target
delmark = []
for key in optdict:
if ((not (key in VALID_OPTIONS)) or (key in ['lplisten', 'implantlisten', 'target'])):
delmark.append(key)
for deler in delmark:
del optdict[deler]
ops.cmd.DszCommand.__init__(self, plugin=plugin, **optdict)
def validateInput(self):
if (self.target_address == '0.0.0.0'):
return False
if ((self.target_port < 0) or (self.target_port > 65535)):
return False
if ((self.listen_port < 0) or (self.listen_port > 65535)):
return False
if ((self.lplisten is None) and (self.implantlisten is None)):
return False
if (self.protocol is None):
return False
for port in [self.source_port, self.client_port]:
if ((port < (-1)) or (port > 65535)):
return False
return True
def __str__(self):
cmdstr = ''
for prefix in self.prefixes:
cmdstr += ('%s ' % prefix)
cmdstr += ('%s -%s -target %s' % (self.plugin, self.protocol, self.target))
if (self.lplisten is not None):
cmdstr += (' -lplisten %s' % self.lplisten)
elif (self.implantlisten is not None):
cmdstr += (' -implantlisten %s' % self.implantlisten)
if self.port_sharing:
cmdstr += (' -portsharing %s' % self.port_sharing)
if self.limit_connections:
cmdstr += (' -limitconnections %s' % self.limit_connections)
for optkey in self.optdict:
if (optkey in ['tcp', 'udp']):
continue
if (self.optdict[optkey] == True):
cmdstr += (' -%s' % optkey)
else:
cmdstr += (' -%s %s' % (optkey, self.optdict[optkey]))
if self.dszquiet:
x = dsz.control.Method()
dsz.control.echo.Off()
return cmdstr
def _getProtocol(self):
if self.tcp:
return 'tcp'
elif self.udp:
return 'udp'
else:
return None
def _setProtocol(self, val):
if (val == TCP):
self.tcp = True
elif (val == UDP):
self.udp = True
else:
raise OpsCommandException('Protocol must be tcp or udp')
protocol = property(_getProtocol, _setProtocol)
def _getTCP(self):
if (('tcp' in self.optdict) and self.optdict['tcp']):
return True
else:
return False
def _setTCP(self, val):
if (((val is None) or (val is False)) and ('tcp' in self.optdict)):
del self.optdict['tcp']
elif (val is True):
self.optdict['tcp'] = val
if ('udp' in self.optdict):
del self.optdict['udp']
tcp = property(_getTCP, _setTCP)
def _getUDP(self):
if (('udp' in self.optdict) and self.optdict['udp']):
return True
else:
return False
def _setUDP(self, val):
if (((val is None) or (val is False)) and ('udp' in self.optdict)):
del self.optdict['udp']
elif (val is True):
self.optdict['udp'] = val
if ('tcp' in self.optdict):
del self.optdict['tcp']
udp = property(_getUDP, _setUDP)
def _getDirection(self):
return self._direction
def _setDirection(self, val):
if (not (val in [IMPLANTLISTEN, LPLISTEN])):
raise OpsCommandException('redirect command: direction must be one of lplisten or implantlisten')
self._direction = val
direction = property(_getDirection, _setDirection)
def _getListenPort(self):
return self._listenport
def _setListenPort(self, val):
val = int(val)
if ((val < 0) or (val > 65535)):
raise OpsCommandException('Listen port must be an integer between 0-65535')
self._listenport = val
listen_port = property(_getListenPort, _setListenPort)
def _getBindAddr(self):
return self._bindAddr
def _setBindAddr(self, val):
if (val is None):
self._bindAddr = '0.0.0.0'
elif util.ip.validate(val):
self._bindAddr = val
bind_address = property(_getBindAddr, _setBindAddr)
def _getLplisten(self):
if (self.direction == LPLISTEN):
retval = str(self.listen_port)
if (self.bind_address != '0.0.0.0'):
retval += (' %s' % self.bind_address)
return retval
else:
return None
def _setLplisten(self, value):
if (value is None):
self.direction = IMPLANTLISTEN
self.direction = LPLISTEN
if (type(value) == str):
options = value.split(' ')
if (len(options) == 2):
(self.listen_port, self.bind_address) = (options[0], options[1])
elif (len(options) == 1):
self.listen_port = options[0]
elif (type(value) == int):
self.listen_port = value
lplisten = property(_getLplisten, _setLplisten)
def _getImplantlisten(self):
if (self.direction == IMPLANTLISTEN):
retval = str(self.listen_port)
if (self.bind_address != '0.0.0.0'):
retval += (' %s' % self.bind_address)
return retval
else:
return None
def _setImplantlisten(self, value):
if (value is None):
self.direction = LPLISTEN
self.direction = IMPLANTLISTEN
if (type(value) == str):
options = value.split(' ')
if (len(options) == 2):
(self.listen_port, self.bind_address) = (options[0], options[1])
elif (len(options) == 1):
self.listen_port = options[0]
elif (type(value) == int):
self.listen_port = value
implantlisten = property(_getImplantlisten, _setImplantlisten)
def _getTargetAddr(self):
return self._targetAddr
def _setTargetAddr(self, value):
value = value.strip()
if util.ip.validate(value):
self._targetAddr = value
else:
raise OpsCommandException('Invalid target IP address')
target_address = property(_getTargetAddr, _setTargetAddr)
def _getTargetPort(self):
return self._targetPort
def _setTargetPort(self, value):
try:
value = int(value)
except ValueError:
raise OpsCommandException('Invalid target port, must be an integer between 0-65535')
self._targetPort = value
target_port = property(_getTargetPort, _setTargetPort)
def _getSourceAddr(self):
return self._sourceAddr.strip()
def _setSourceAddr(self, value):
value = value.strip()
if util.ip.validate(value):
self._sourceAddr = value
else:
raise OpsCommandException(('Invalid source IP address %s' % value))
source_address = property(_getSourceAddr, _setSourceAddr)
def _getSourcePort(self):
return self._sourcePort
def _setSourcePort(self, value):
try:
value = int(value)
if ((value < (-1)) or (value > 65535)):
raise OpsCommandException('Invalid source port, must be an integer between 0-65535 or -1 for unspecified')
except ValueError:
raise OpsCommandException('Invalid source port, must be an integer between 0-65535')
self._sourcePort = value
source_port = property(_getSourcePort, _setSourcePort)
def _getTarget(self):
retval = ('%s %d' % (self.target_address, self.target_port))
if (self.source_address != '0.0.0.0'):
retval += (' %s' % self.source_address)
if (self.source_port != (-1)):
retval += (' %d' % self.source_port)
return retval
def _setTarget(self, value):
if (value is None):
self.target_address = '0.0.0.0'
self.target_port = (-1)
return
parts = value.split(' ')
if (len(parts) < 2):
raise OpsCommandException('You must specify at least a target address and target port')
self.target_address = parts[0]
self.target_port = parts[1]
if (len(parts) >= 3):
self.source_address = parts[2]
if (len(parts) == 4):
self.source_port = parts[3]
target = property(_getTarget, _setTarget)
def _getClientAddr(self):
return self._clientAddr
def _setClientAddr(self, value):
value = value.strip()
if (value == '0.0.0.0'):
raise OpsCommandException('Invalid client IP address 0.0.0.0')
elif util.ip.validate(value):
self._clientAddr = value
else:
raise OpsCommandException(('Invalid client IP address %s' % value))
client_address = property(_getClientAddr, _setClientAddr)
def _getClientPort(self):
return self._clientPort
def _setClientPort(self, value):
try:
value = int(value)
if ((value < 0) or (value > 65535)):
raise OpsCommandException('Invalid client port, must be an integer between 0-65535')
except ValueError:
raise OpsCommandException('Invalid client port, must be an integer between 0-65535')
self._clientPort = value
client_port = property(_getClientPort, _setClientPort)
def _getPortsharing(self):
if (self.client_port > (-1)):
return ('%d %s' % (self.client_port, self.client_address))
else:
return None
def _setPortsharing(self, value):
if (value is None):
(self.client_address == '0.0.0.0')
self.client_port = (-1)
else:
parts = value.split(' ')
if (len(parts) != 2):
raise OpsCommandException('You must specify client source address and client source port and nothing else when using port sharing')
self.client_address = parts[1]
self.client_port = parts[0]
port_sharing = property(_getPortsharing, _setPortsharing)
def _getLimitAddr(self):
return self._limitAddr
def _setLimitAddr(self, value):
value = value.strip()
if (value == '0.0.0.0'):
raise OpsCommandException('Invalid limit IP address 0.0.0.0')
elif util.ip.validate(value):
self._limitAddr = value
else:
raise OpsCommandException(('Invalid limit IP address %s' % value))
limit_address = property(_getLimitAddr, _setLimitAddr)
def _getLimitMask(self):
return self._limitMask
def _setLimitMask(self, value):
value = value.strip()
if util.ip.validate(value):
self._limitMask = value
else:
raise OpsCommandException(('Invalid limit mask %s' % value))
limit_mask = property(_getLimitMask, _setLimitMask)
def _getLimitConnections(self):
if (self.limit_address != '0.0.0.0'):
return ('%s %s' % (self.limit_address, self.limit_mask))
else:
return None
def _setLimitConnections(self, value):
if (value is None):
self.limit_address = '0.0.0.0'
self.limit_mask = '0.0.0.0'
else:
parts = value.split(' ')
if (len(parts) != 2):
raise OpsCommandException('You must specify limit address and limit mask and nothing else when using connection limiting')
self.limit_mask = parts[1]
self.limit_address = parts[0]
limit_connections = property(_getLimitConnections, _setLimitConnections)
def _getConnections(self):
if ('connections' in self.optdict):
return self.optdict['connections']
else:
return 0
def _setConnections(self, value):
if (value is not None):
try:
value = int(value)
self.optdict['connections'] = value
except ValueError:
raise OpsCommandException('Max connections for a redirect command must be an integer >= 0')
else:
self.optdict['connections'] = 0
connections = property(_getConnections, _setConnections)
def _getPacketsize(self):
if ('packetsize' in self.optdict):
return self.optdict['packetsize']
else:
return 0
def _setPacketsize(self, value):
if (value is not None):
try:
value = int(value)
self.optdict['packetsize'] = value
except ValueError:
raise OpsCommandException('Packetsize for a redirect command must be an integer > 0')
elif ('packetsize' in self.optdict):
del self.optdict['packetsize']
packetsize = property(_getPacketsize, _setPacketsize)
def _getRedirNotify(self):
if (('sendnotify' in self.optdict) and self.optdict['sendnotify']):
return True
else:
return False
def _setRedirNotify(self, val):
if (((val is None) or (val is False)) and ('sendnotify' in self.optdict)):
del self.optdict['sendnotify']
elif (val is True):
self.optdict['sendnotify'] = val
redir_notify = property(_getRedirNotify, _setRedirNotify)
ops.cmd.command_classes['redirect'] = RedirectCommand
ops.cmd.aliasoptions['redirect'] = VALID_OPTIONS | unlicense |
jasonzzz/ansible | lib/ansible/plugins/lookup/subelements.py | 6 | 4311 | # (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.boolean import boolean
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, "
+ msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0].iterkeys():
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False))
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if not subkey in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
| gpl-3.0 |
recall704/scrapy-docs-cn | tests/test_downloadermiddleware_stats.py | 101 | 1596 | from unittest import TestCase
from scrapy.downloadermiddlewares.stats import DownloaderStats
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class MyException(Exception):
pass
class TestDownloaderStats(TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('scrapytest.org')
self.mw = DownloaderStats(self.crawler.stats)
self.crawler.stats.open_spider(self.spider)
self.req = Request('http://scrapytest.org')
self.res = Response('scrapytest.org', status=400)
def assertStatsEqual(self, key, value):
self.assertEqual(
self.crawler.stats.get_value(key, spider=self.spider),
value,
str(self.crawler.stats.get_stats(self.spider))
)
def test_process_request(self):
self.mw.process_request(self.req, self.spider)
self.assertStatsEqual('downloader/request_count', 1)
def test_process_response(self):
self.mw.process_response(self.req, self.res, self.spider)
self.assertStatsEqual('downloader/response_count', 1)
def test_process_exception(self):
self.mw.process_exception(self.req, MyException(), self.spider)
self.assertStatsEqual('downloader/exception_count', 1)
self.assertStatsEqual(
'downloader/exception_type_count/tests.test_downloadermiddleware_stats.MyException',
1
)
def tearDown(self):
self.crawler.stats.close_spider(self.spider, '')
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py | 13 | 3355 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import telemetry.timeline.event as timeline_event
from telemetry.testing import test_page_test_results
from telemetry.web_perf.metrics import startup
class StartupTimelineMetricTest(unittest.TestCase):
def setUp(self):
self.events = []
def AddEvent(self, event_name, start, duration=None):
event = timeline_event.TimelineEvent('my_category', event_name,
start, duration)
self.events.append(event)
# Attributes defined outside __init__
# pylint: disable=attribute-defined-outside-init
def ComputeStartupMetrics(self):
results = test_page_test_results.TestPageTestResults(self)
# Create a mock model usable by
# StartupTimelineMetric.AddWholeTraceResults().
def IterateEvents(event_predicate):
for event in self.events:
if event_predicate(event):
yield event
class MockClass(object):
pass
model = MockClass()
model.browser_process = MockClass()
model.browser_process.parent = MockClass()
model.browser_process.parent.IterAllEvents = IterateEvents
startup.StartupTimelineMetric().AddWholeTraceResults(model, results)
return results
def testUntrackedvents(self):
# Code coverage for untracked events
self.AddEvent('uknown_event_0', 0)
self.AddEvent('uknown_event_1', 1)
self.ComputeStartupMetrics()
def testInstantEventsBasedValue(self):
# Test case with instant events to measure the duration between the first
# occurrences of two distinct events.
START0 = 7
START1 = 8
DURATION0 = 17
DURATION1 = 18
# Generate duplicated events to make sure we consider only the first one.
self.AddEvent(startup._MAIN_ENTRY_POINT, START0)
self.AddEvent(startup._MAIN_ENTRY_POINT, START1)
self.AddEvent('loadEventEnd', START0 + DURATION0)
self.AddEvent('loadEventEnd', START1 + DURATION1)
self.AddEvent('requestStart', START0 + DURATION0 * 2)
self.AddEvent('requestStart', START1 + DURATION1 * 2)
results = self.ComputeStartupMetrics()
results.AssertHasPageSpecificScalarValue('foreground_tab_load_complete',
'ms', DURATION0)
results.AssertHasPageSpecificScalarValue('foreground_tab_request_start',
'ms', DURATION0 * 2)
def testDurationEventsBasedValues(self):
DURATION_EVENTS = set([
'messageloop_start_time',
'window_display_time',
'open_tabs_time',
'first_non_empty_paint_time',
'first_main_frame_load_time'])
# Test case to get the duration of the first occurrence of a duration event.
i = 1
for display_name in DURATION_EVENTS:
self.assertTrue(len(startup._METRICS[display_name]) == 1)
event_name = startup._METRICS[display_name][0]
duration = 13 * i
i += 1
# Generate duplicated events to make sure only the first event is
# considered.
self.AddEvent(event_name, 5, duration)
self.AddEvent(event_name, 6, duration + 2)
results = self.ComputeStartupMetrics()
i = 1
for display_name in DURATION_EVENTS:
duration = 13 * i
i += 1
results.AssertHasPageSpecificScalarValue(display_name, 'ms', duration)
| gpl-3.0 |
tigawa/proofreadingchecker | vendor/bundle/ruby/1.9.1/gems/libv8-3.16.14.3/vendor/gyp/test/variables/commands/gyptest-commands-ignore-env.py | 330 | 1466 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that environment variables are ignored when --ignore-environment is
specified.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
os.environ['GYP_DEFINES'] = 'FOO=BAR'
os.environ['GYP_GENERATORS'] = 'foo'
os.environ['GYP_GENERATOR_FLAGS'] = 'genflag=foo'
os.environ['GYP_GENERATOR_OUTPUT'] = 'somedir'
expect = test.read('commands.gyp.ignore-env.stdout').replace('\r\n', '\n')
test.run_gyp('commands.gyp',
'--debug', 'variables',
'--ignore-environment',
stdout=expect, ignore_line_numbers=True)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r', '')
expect = test.read('commands.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
test.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
| apache-2.0 |
rohit21122012/DCASE2013 | runs/2016/baseline2016_mfcc_21/src/evaluation.py | 56 | 43426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import sys
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
return numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1) + self.eps)
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth,
labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i][
'event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar,
percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall[
'Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
guglielmino/pushetta-api-django | pushetta/api/channels_sl.py | 1 | 6038 | # coding=utf-8
# Progetto: Pushetta API
# Service layer con le funzionalità per la gestione Channels
import logging
logger = logging.getLogger(__name__)
from rest_framework import generics, permissions
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from django.core.paginator import Paginator, PageNotAnInteger
from haystack.query import SearchQuerySet
from haystack.inputs import Clean
from api.permissions import IsChannelOwner
from api.serializers import ChannelSerializer, ChannelSubscriptionSerializer, PaginatedChannelSerializer
from core.models import Channel, ChannelSubscribeRequest
from core.subscriber_manager import SubscriberManager
from core.services import ask_subscribe_channel, search_public_channels, get_suggested_channels
from core.services import SubscribeResponse
class ChannelsList(generics.ListCreateAPIView):
"""
Class for handling Create/Update/List/Delete of Channels
"""
model = Channel
serializer_class = ChannelSerializer
permission_classes = [IsAuthenticated, IsChannelOwner]
def pre_save(self, obj):
obj.owner = self.request.user
def get_queryset(self):
return Channel.objects.filter(owner=self.request.user)
class ChannelSearch(generics.ListAPIView):
"""
Search for channels based on query keywords
q -- keywords used in search
"""
model = Channel
serializer_class = PaginatedChannelSerializer
permission_classes = [
permissions.AllowAny
]
def get(self, request, *args, **kwargs):
q = request.QUERY_PARAMS.get('q', '')
# sqs = SearchQuerySet().models(Channel).filter(content=Clean(q))
sqs = search_public_channels(q)
paginator = Paginator(sqs, 50)
page = request.QUERY_PARAMS.get('page')
if not page:
page = 1
try:
channels = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page
channels = paginator.page(1)
except PageNotAnInteger:
# If page is out of range, deliver last page
channels = paginator.page(paginator.num_pages)
serializer_context = {'request': request}
serializer = PaginatedChannelSerializer(channels, context=serializer_context)
return Response(serializer.data)
class ChannelSuggestion(generics.ListAPIView):
"""
Channel suggestions based on popularity
"""
model = Channel
serializer_class = ChannelSerializer
permission_classes = [
permissions.AllowAny
]
def get(self, request, device_id=None):
suggestion = get_suggested_channels()
if device_id is not None:
# Vengono rimossi dai suggeriti quelli già sottoscritti
channel_names = SubscriberManager().get_device_subscriptions(device_id)
suggestion = [sugg for sugg in suggestion if not sugg.name.lower() in channel_names]
serializer = ChannelSerializer(suggestion, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ChannelSubscription(generics.GenericAPIView):
"""
Handling of Channels subscriptions
"""
serializer_class = ChannelSubscriptionSerializer
permission_classes = [
permissions.AllowAny
]
def post(self, request, format=None, name=None):
"""
Subscribe to the Channel identified by "name"
"""
channels = Channel.objects.filter(name=name)
if not channels:
logger.error("Subscribe to inexistent channel : " + name)
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = ChannelSubscriptionSerializer(data=request.DATA)
if serializer.is_valid():
subscriber_data = serializer.object
channel = channels[0]
subscribe_resp = ask_subscribe_channel(channel, subscriber_data['device_id'])
if subscribe_resp == SubscribeResponse.SUBSCRIBED:
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(status=(status.HTTP_202_ACCEPTED if subscribe_resp == SubscribeResponse.REQUEST_SEND
else status.HTTP_400_BAD_REQUEST))
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ChannelUnSubscription(generics.GenericAPIView):
"""
Handling of Channels subscriptions
"""
serializer_class = ChannelSubscriptionSerializer
permission_classes = [
permissions.AllowAny
]
# Nota: RFC2616 (http://www.w3.org/Protocols/rfc2616/rfc2616.html) definisce che è accettabile il DELETE con un body
def delete(self, request, name=None, sub_type=None, device_id=None):
"""
Unsubscribe from a channel
"""
channels = Channel.objects.filter(name=name)
if not channels:
return Response(status=status.HTTP_404_NOT_FOUND)
if not name is None and not device_id is None:
result = status.HTTP_200_OK
channel = channels[0]
subManager = SubscriberManager()
sub_token = subManager.get_subscription(channel.name, sub_type, device_id)
if not sub_token is None:
subManager.unsubscribe(channel.name, device_id, sub_type)
# Dec num subscriptions
channel.subscriptions = channel.subscriptions - 1
channel.save()
else:
# Verifica che non si tratti di una subscription
reqs = ChannelSubscribeRequest.objects.filter(device_id=device_id).filter(channel=channel)
if reqs.count() > 0:
reqs[0].delete()
else:
result = status.HTTP_404_NOT_FOUND
return Response(status=result)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
| gpl-3.0 |
nomeata/codespeed | codespeed/migrations/0001_initial.py | 5 | 13935 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table('codespeed_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('repo_type', self.gf('django.db.models.fields.CharField')(default='N', max_length=1)),
('repo_path', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('repo_user', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('repo_pass', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('track', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('codespeed', ['Project'])
# Adding model 'Revision'
db.create_table('codespeed_revision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('commitid', self.gf('django.db.models.fields.CharField')(max_length=42)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='revisions', to=orm['codespeed.Project'])),
('tag', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('message', self.gf('django.db.models.fields.TextField')(blank=True)),
('author', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
))
db.send_create_signal('codespeed', ['Revision'])
# Adding unique constraint on 'Revision', fields ['commitid', 'project']
db.create_unique('codespeed_revision', ['commitid', 'project_id'])
# Adding model 'Executable'
db.create_table('codespeed_executable', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('description', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='executables', to=orm['codespeed.Project'])),
))
db.send_create_signal('codespeed', ['Executable'])
# Adding model 'Benchmark'
db.create_table('codespeed_benchmark', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('benchmark_type', self.gf('django.db.models.fields.CharField')(default='C', max_length=1)),
('description', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('units_title', self.gf('django.db.models.fields.CharField')(default='Time', max_length=30)),
('units', self.gf('django.db.models.fields.CharField')(default='seconds', max_length=20)),
('lessisbetter', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('codespeed', ['Benchmark'])
# Adding model 'Environment'
db.create_table('codespeed_environment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('cpu', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('memory', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('os', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('kernel', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
))
db.send_create_signal('codespeed', ['Environment'])
# Adding model 'Result'
db.create_table('codespeed_result', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.FloatField')()),
('std_dev', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('val_min', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('val_max', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('revision', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['codespeed.Revision'])),
('executable', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['codespeed.Executable'])),
('benchmark', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['codespeed.Benchmark'])),
('environment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='results', to=orm['codespeed.Environment'])),
))
db.send_create_signal('codespeed', ['Result'])
# Adding unique constraint on 'Result', fields ['revision', 'executable', 'benchmark', 'environment']
db.create_unique('codespeed_result', ['revision_id', 'executable_id', 'benchmark_id', 'environment_id'])
# Adding model 'Report'
db.create_table('codespeed_report', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision', self.gf('django.db.models.fields.related.ForeignKey')(related_name='reports', to=orm['codespeed.Revision'])),
('environment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='reports', to=orm['codespeed.Environment'])),
('executable', self.gf('django.db.models.fields.related.ForeignKey')(related_name='reports', to=orm['codespeed.Executable'])),
('summary', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('colorcode', self.gf('django.db.models.fields.CharField')(default='none', max_length=10)),
('_tablecache', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('codespeed', ['Report'])
# Adding unique constraint on 'Report', fields ['revision', 'executable', 'environment']
db.create_unique('codespeed_report', ['revision_id', 'executable_id', 'environment_id'])
def backwards(self, orm):
# Removing unique constraint on 'Report', fields ['revision', 'executable', 'environment']
db.delete_unique('codespeed_report', ['revision_id', 'executable_id', 'environment_id'])
# Removing unique constraint on 'Result', fields ['revision', 'executable', 'benchmark', 'environment']
db.delete_unique('codespeed_result', ['revision_id', 'executable_id', 'benchmark_id', 'environment_id'])
# Removing unique constraint on 'Revision', fields ['commitid', 'project']
db.delete_unique('codespeed_revision', ['commitid', 'project_id'])
# Deleting model 'Project'
db.delete_table('codespeed_project')
# Deleting model 'Revision'
db.delete_table('codespeed_revision')
# Deleting model 'Executable'
db.delete_table('codespeed_executable')
# Deleting model 'Benchmark'
db.delete_table('codespeed_benchmark')
# Deleting model 'Environment'
db.delete_table('codespeed_environment')
# Deleting model 'Result'
db.delete_table('codespeed_result')
# Deleting model 'Report'
db.delete_table('codespeed_report')
models = {
'codespeed.benchmark': {
'Meta': {'object_name': 'Benchmark'},
'benchmark_type': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lessisbetter': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'units': ('django.db.models.fields.CharField', [], {'default': "'seconds'", 'max_length': '20'}),
'units_title': ('django.db.models.fields.CharField', [], {'default': "'Time'", 'max_length': '30'})
},
'codespeed.environment': {
'Meta': {'object_name': 'Environment'},
'cpu': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kernel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'memory': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
'codespeed.executable': {
'Meta': {'object_name': 'Executable'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'executables'", 'to': "orm['codespeed.Project']"})
},
'codespeed.project': {
'Meta': {'object_name': 'Project'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'repo_pass': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'repo_user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'track': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'codespeed.report': {
'Meta': {'unique_together': "(('revision', 'executable', 'environment'),)", 'object_name': 'Report'},
'_tablecache': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'colorcode': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '10'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': "orm['codespeed.Environment']"}),
'executable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': "orm['codespeed.Executable']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports'", 'to': "orm['codespeed.Revision']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
'codespeed.result': {
'Meta': {'unique_together': "(('revision', 'executable', 'benchmark', 'environment'),)", 'object_name': 'Result'},
'benchmark': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['codespeed.Benchmark']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['codespeed.Environment']"}),
'executable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['codespeed.Executable']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['codespeed.Revision']"}),
'std_dev': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'val_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'codespeed.revision': {
'Meta': {'unique_together': "(('commitid', 'project'),)", 'object_name': 'Revision'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'commitid': ('django.db.models.fields.CharField', [], {'max_length': '42'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['codespeed.Project']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
}
}
complete_apps = ['codespeed']
| lgpl-2.1 |
Galithil/charon | charon/user.py | 3 | 3993 | " Charon: User account handling."
import logging
import json
import urllib
import tornado.web
import couchdb
import requests
from . import constants
from . import settings
from . import utils
from .requesthandler import RequestHandler
from .saver import Saver
class UserSaver(Saver):
doctype = constants.USER
class Login(RequestHandler):
"Login handler."
def get(self):
self.render('login.html',
error=None,
next=self.get_argument('next', None))
def post(self):
self.check_xsrf_cookie()
try:
self.authenticate_user(self.get_argument('email'),
self.get_argument('password'))
url = self.get_argument('next', None)
if not url:
url = self.reverse_url('home')
self.redirect(url)
except (tornado.web.MissingArgumentError, ValueError), msg:
logging.debug("login error: %s", msg)
self.render('login.html',
error=str(msg),
next=self.get_argument('next', None))
def authenticate_user(self, email, password):
"""Authenticate the given email and password.
This is done by consulting the Userman web service.
Save or update the user in this database.
Raise ValueError if any error.
"""
if not email:
raise ValueError('no email given')
if not password:
raise ValueError('no password given')
url = "{0}/{1}".format(settings['AUTH']['AUTH_HREF'],
urllib.quote(email))
data = json.dumps(dict(password=password, service='Charon'))
headers = {'X-Userman-API-token': settings['AUTH']['API_TOKEN']}
response = requests.post(url, data=data, headers=headers)
if response.status_code != requests.codes.ok:
raise ValueError(str(response.reason))
try:
user = self.get_user(email)
except tornado.web.HTTPError:
user = response.json()
else:
user.update(response.json())
with UserSaver(doc=user, rqh=self) as saver:
# All other changes already made.
if not user.get('api_token'):
saver['api_token'] = utils.get_iuid()
self.set_secure_cookie(constants.USER_COOKIE_NAME, email,
expires_days=settings['LOGIN_EXPIRES_DAYS'])
class Logout(RequestHandler):
"Logout handler."
def post(self):
self.check_xsrf_cookie()
self.set_secure_cookie(constants.USER_COOKIE_NAME, '')
self.redirect(self.reverse_url('login'))
class User(RequestHandler):
"User account handler."
@tornado.web.authenticated
def get(self, email):
user = self.get_user(email)
current_user = self.get_current_user()
privileged = current_user == user or current_user['role'] == 'admin'
self.render('user.html',
user=user,
privileged=privileged,
logs=self.get_logs(user['_id']))
class UserApiToken(RequestHandler):
"API token handler for user account."
@tornado.web.authenticated
def post(self, email):
"Set the API token for the user."
self.check_xsrf_cookie()
user = self.get_user(email)
current_user = self.get_current_user()
privileged = current_user == user or current_user['role'] == 'admin'
if not privileged:
raise tornado.web.HTTPError(403)
with UserSaver(doc=user, rqh=self) as saver:
saver['api_token'] = utils.get_iuid()
self.redirect(self.reverse_url('user', user['email']))
class Users(RequestHandler):
"Display all users."
@tornado.web.authenticated
def get(self):
view = self.db.view('user/email')
users = [self.get_user(r.key) for r in view]
self.render('users.html', users=users)
| mit |
ZenHarbinger/snapcraft | snapcraft/internal/pluginhandler/stage_package_grammar/errors.py | 2 | 1402 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft.internal import errors
class StagePackageSyntaxError(errors.SnapcraftError):
fmt = 'Invalid syntax for stage packages: {message}'
def __init__(self, message):
super().__init__(message=message)
class OnStatementSyntaxError(StagePackageSyntaxError):
def __init__(self, on_statement, *, message=None):
components = ["{!r} is not a valid 'on' clause".format(on_statement)]
if message:
components.append(message)
super().__init__(message=': '.join(components))
class UnsatisfiedStatementError(errors.SnapcraftError):
fmt = 'Unable to satisfy {statement!r}, failure forced'
def __init__(self, statement):
super().__init__(statement=statement)
| gpl-3.0 |
GeographicaGS/moocng | moocng/courses/translation.py | 1 | 1176 | from modeltranslation.translator import translator, TranslationOptions
from moocng.courses.models import Course, StaticPage, Unit, KnowledgeQuantum, Question, Option
class CourseTranslationOptions(TranslationOptions):
fields = ('name', 'description', 'requirements', 'learning_goals', 'intended_audience', 'promotion_media_content_id')
class StaticPageTranslationOptions(TranslationOptions):
fields = ('title', 'body')
class UnitTranslationOptions(TranslationOptions):
fields = ('title',)
class KnowledgeQuantumTranslationOptions(TranslationOptions):
fields = ('title', 'teacher_comments', 'supplementary_material', 'media_content_id')
class QuestionTranslationOptions(TranslationOptions):
fields = ('solution_text',)
class OptionTranslationOptions(TranslationOptions):
fields = ('solution', 'text', 'feedback')
translator.register(Course, CourseTranslationOptions)
translator.register(StaticPage, StaticPageTranslationOptions)
translator.register(Unit, UnitTranslationOptions)
translator.register(KnowledgeQuantum, KnowledgeQuantumTranslationOptions)
translator.register(Question, QuestionTranslationOptions)
translator.register(Option, OptionTranslationOptions) | apache-2.0 |
gorkinovich/DefendersOfMankind | dependencies/Ogre/Tools/Wings3DExporter/xmlout.py | 34 | 1531 |
# extremely simple XML writer
#
# This is to remove libxml2 dependency on platforms where it's
# difficult to build
#
# 2003 Attila Tajti <attis@spacehawks.hu>
class XMLDoc:
def __init__(self, version):
self.version = version
self.root_element = None
def saveFile(self, filename):
f = file(filename, "w")
f.write('<?xml version="' + self.version + '"?>\n')
self.root_element._write(f, 0)
def saveFormatFile(self, filename, fmt):
self.saveFile(filename)
def freeDoc(self):
pass
class XMLNode:
def __init__(self, name):
self.name = name
self.props = []
self.children = []
self.content = None
def docSetRootElement(self, doc):
doc.root_element = self
def newChild(self, namespace, name, content):
if namespace:
fullname = namespace + ':' + name
else:
fullname = name
child = XMLNode(fullname)
child.content = content
self.children.append(child)
return child
def setProp(self, name, value):
self.props.append((name, value))
def _write(self, f, indent):
#istr = " " * indent
istr = "\t" * indent
# put together our tag
tag = self.name
for prop in self.props:
name, value = prop
tag += ' ' + name + '="' + value + '"'
# print tag, or children between tags
if self.children:
f.write(istr + '<%s>\n' % tag)
for child in self.children:
child._write(f, indent + 1)
f.write(istr + '</%s>\n' % self.name)
else:
f.write(istr + '<%s/>\n' % tag)
def newDoc(version):
return XMLDoc(version)
def newNode(name):
return XMLNode(name)
| gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| mit |
emfcamp/micropython | tests/basics/class_bind_self.py | 59 | 1235 | # test for correct binding of self when accessing attr of an instance
class A:
def __init__(self, arg):
self.val = arg
def __str__(self):
return 'A.__str__ ' + str(self.val)
def __call__(self, arg):
return 'A.__call__', arg
def foo(self, arg):
return 'A.foo', self.val, arg
def make_closure(x_in):
x = x_in
def closure(y):
return x, y is c
return closure
class C:
# these act like methods and bind self
def f1(self, arg):
return 'C.f1', self is c, arg
f2 = lambda self, arg: ('C.f2', self is c, arg)
f3 = make_closure('f3') # closure
def f4(self, arg): # generator
yield self is c, arg
# these act like simple variables and don't bind self
f5 = int # builtin type
f6 = abs # builtin function
f7 = A # user type
f8 = A(8) # user instance which is callable
f9 = A(9).foo # user bound method
c = C()
print(c.f1(1))
print(c.f2(2))
print(c.f3())
print(next(c.f4(4)))
print(c.f5(5))
#print(c.f6(-6)) not working in uPy
print(c.f7(7))
print(c.f8(8))
print(c.f9(9))
# not working in uPy
#class C(list):
# # this acts like a method and binds self
# f1 = list.extend
#c = C()
#c.f1([3, 1, 2])
#print(c)
| mit |
mdhaber/scipy | scipy/spatial/tests/test_slerp.py | 11 | 15434 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.spatial import geometric_slerp
def _generate_spherical_points(ndim=3, n_pts=2):
# generate uniform points on sphere
# see: https://stackoverflow.com/a/23785326
# tentatively extended to arbitrary dims
# for 0-sphere it will always produce antipodes
np.random.seed(123)
points = np.random.normal(size=(n_pts, ndim))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points[0], points[1]
class TestGeometricSlerp:
# Test various properties of the geometric slerp code
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [0, 3, 17])
def test_shape_property(self, n_dims, n_pts):
# geometric_slerp output shape should match
# input dimensionality & requested number
# of interpolation points
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert actual.shape == (n_pts, n_dims)
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [3, 17])
def test_include_ends(self, n_dims, n_pts):
# geometric_slerp should return a data structure
# that includes the start and end coordinates
# when t includes 0 and 1 ends
# this is convenient for plotting surfaces represented
# by interpolations for example
# the generator doesn't work so well for the unit
# sphere (it always produces antipodes), so use
# custom values there
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert_allclose(actual[0], start)
assert_allclose(actual[-1], end)
@pytest.mark.parametrize("start, end", [
# both arrays are not flat
(np.zeros((1, 3)), np.ones((1, 3))),
# only start array is not flat
(np.zeros((1, 3)), np.ones(3)),
# only end array is not flat
(np.zeros(1), np.ones((3, 1))),
])
def test_input_shape_flat(self, start, end):
# geometric_slerp should handle input arrays that are
# not flat appropriately
with pytest.raises(ValueError, match='one-dimensional'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# 7-D and 3-D ends
(np.zeros(7), np.ones(3)),
# 2-D and 1-D ends
(np.zeros(2), np.ones(1)),
# empty, "3D" will also get caught this way
(np.array([]), np.ones(3)),
])
def test_input_dim_mismatch(self, start, end):
# geometric_slerp must appropriately handle cases where
# an interpolation is attempted across two different
# dimensionalities
with pytest.raises(ValueError, match='dimensions'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# both empty
(np.array([]), np.array([])),
])
def test_input_at_least1d(self, start, end):
# empty inputs to geometric_slerp must
# be handled appropriately when not detected
# by mismatch
with pytest.raises(ValueError, match='at least two-dim'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end, expected", [
# North and South Poles are definitely antipodes
# but should be handled gracefully now
(np.array([0, 0, 1.0]), np.array([0, 0, -1.0]), "warning"),
# this case will issue a warning & be handled
# gracefully as well;
# North Pole was rotated very slightly
# using r = R.from_euler('x', 0.035, degrees=True)
# to achieve Euclidean distance offset from diameter by
# 9.328908379124812e-08, within the default tol
(np.array([0.00000000e+00,
-6.10865200e-04,
9.99999813e-01]), np.array([0, 0, -1.0]), "warning"),
# this case should succeed without warning because a
# sufficiently large
# rotation was applied to North Pole point to shift it
# to a Euclidean distance of 2.3036691931821451e-07
# from South Pole, which is larger than tol
(np.array([0.00000000e+00,
-9.59930941e-04,
9.99999539e-01]), np.array([0, 0, -1.0]), "success"),
])
def test_handle_antipodes(self, start, end, expected):
# antipodal points must be handled appropriately;
# there are an infinite number of possible geodesic
# interpolations between them in higher dims
if expected == "warning":
with pytest.warns(UserWarning, match='antipodes'):
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
else:
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
# antipodes or near-antipodes should still produce
# slerp paths on the surface of the sphere (but they
# may be ambiguous):
assert_allclose(np.linalg.norm(res, axis=1), 1.0)
@pytest.mark.parametrize("start, end, expected", [
# 2-D with n_pts=4 (two new interpolation points)
# this is an actual circle
(np.array([1, 0]),
np.array([0, 1]),
np.array([[1, 0],
[np.sqrt(3) / 2, 0.5], # 30 deg on unit circle
[0.5, np.sqrt(3) / 2], # 60 deg on unit circle
[0, 1]])),
# likewise for 3-D (add z = 0 plane)
# this is an ordinary sphere
(np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([[1, 0, 0],
[np.sqrt(3) / 2, 0.5, 0],
[0.5, np.sqrt(3) / 2, 0],
[0, 1, 0]])),
# for 5-D, pad more columns with constants
# zeros are easiest--non-zero values on unit
# circle are more difficult to reason about
# at higher dims
(np.array([1, 0, 0, 0, 0]),
np.array([0, 1, 0, 0, 0]),
np.array([[1, 0, 0, 0, 0],
[np.sqrt(3) / 2, 0.5, 0, 0, 0],
[0.5, np.sqrt(3) / 2, 0, 0, 0],
[0, 1, 0, 0, 0]])),
])
def test_straightforward_examples(self, start, end, expected):
# some straightforward interpolation tests, sufficiently
# simple to use the unit circle to deduce expected values;
# for larger dimensions, pad with constants so that the
# data is N-D but simpler to reason about
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
assert_allclose(actual, expected, atol=1e-16)
@pytest.mark.parametrize("t", [
# both interval ends clearly violate limits
np.linspace(-20, 20, 300),
# only one interval end violating limit slightly
np.linspace(-0.0001, 0.0001, 17),
])
def test_t_values_limits(self, t):
# geometric_slerp() should appropriately handle
# interpolation parameters < 0 and > 1
with pytest.raises(ValueError, match='interpolation parameter'):
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=t)
@pytest.mark.parametrize("start, end", [
(np.array([1]),
np.array([0])),
(np.array([0]),
np.array([1])),
(np.array([-17.7]),
np.array([165.9])),
])
def test_0_sphere_handling(self, start, end):
# it does not make sense to interpolate the set of
# two points that is the 0-sphere
with pytest.raises(ValueError, match='at least two-dim'):
_ = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
@pytest.mark.parametrize("tol", [
# an integer currently raises
5,
# string raises
"7",
# list and arrays also raise
[5, 6, 7], np.array(9.0),
])
def test_tol_type(self, tol):
# geometric_slerp() should raise if tol is not
# a suitable float type
with pytest.raises(ValueError, match='must be a float'):
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("tol", [
-5e-6,
-7e-10,
])
def test_tol_sign(self, tol):
# geometric_slerp() currently handles negative
# tol values, as long as they are floats
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("start, end", [
# 1-sphere (circle) with one point at origin
# and the other on the circle
(np.array([1, 0]), np.array([0, 0])),
# 2-sphere (normal sphere) with both points
# just slightly off sphere by the same amount
# in different directions
(np.array([1 + 1e-6, 0, 0]),
np.array([0, 1 - 1e-6, 0])),
# same thing in 4-D
(np.array([1 + 1e-6, 0, 0, 0]),
np.array([0, 1 - 1e-6, 0, 0])),
])
def test_unit_sphere_enforcement(self, start, end):
# geometric_slerp() should raise on input that clearly
# cannot be on an n-sphere of radius 1
with pytest.raises(ValueError, match='unit n-sphere'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 5))
@pytest.mark.parametrize("start, end", [
# 1-sphere 45 degree case
(np.array([1, 0]),
np.array([np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
# 2-sphere 135 degree case
(np.array([1, 0]),
np.array([-np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
])
@pytest.mark.parametrize("t_func", [
np.linspace, np.logspace])
def test_order_handling(self, start, end, t_func):
# geometric_slerp() should handle scenarios with
# ascending and descending t value arrays gracefully;
# results should simply be reversed
# for scrambled / unsorted parameters, the same values
# should be returned, just in scrambled order
num_t_vals = 20
np.random.seed(789)
forward_t_vals = t_func(0, 10, num_t_vals)
# normalize to max of 1
forward_t_vals /= forward_t_vals.max()
reverse_t_vals = np.flipud(forward_t_vals)
shuffled_indices = np.arange(num_t_vals)
np.random.shuffle(shuffled_indices)
scramble_t_vals = forward_t_vals.copy()[shuffled_indices]
forward_results = geometric_slerp(start=start,
end=end,
t=forward_t_vals)
reverse_results = geometric_slerp(start=start,
end=end,
t=reverse_t_vals)
scrambled_results = geometric_slerp(start=start,
end=end,
t=scramble_t_vals)
# check fidelity to input order
assert_allclose(forward_results, np.flipud(reverse_results))
assert_allclose(forward_results[shuffled_indices],
scrambled_results)
@pytest.mark.parametrize("t", [
# string:
"15, 5, 7",
# complex numbers currently produce a warning
# but not sure we need to worry about it too much:
# [3 + 1j, 5 + 2j],
])
def test_t_values_conversion(self, t):
with pytest.raises(ValueError):
_ = geometric_slerp(start=np.array([1]),
end=np.array([0]),
t=t)
def test_accept_arraylike(self):
# array-like support requested by reviewer
# in gh-10380
actual = geometric_slerp([1, 0], [0, 1], [0, 1/3, 0.5, 2/3, 1])
# expected values are based on visual inspection
# of the unit circle for the progressions along
# the circumference provided in t
expected = np.array([[1, 0],
[np.sqrt(3) / 2, 0.5],
[np.sqrt(2) / 2,
np.sqrt(2) / 2],
[0.5, np.sqrt(3) / 2],
[0, 1]], dtype=np.float64)
# Tyler's original Cython implementation of geometric_slerp
# can pass at atol=0 here, but on balance we will accept
# 1e-16 for an implementation that avoids Cython and
# makes up accuracy ground elsewhere
assert_allclose(actual, expected, atol=1e-16)
def test_scalar_t(self):
# when t is a scalar, return value is a single
# interpolated point of the appropriate dimensionality
# requested by reviewer in gh-10380
actual = geometric_slerp([1, 0], [0, 1], 0.5)
expected = np.array([np.sqrt(2) / 2,
np.sqrt(2) / 2], dtype=np.float64)
assert actual.shape == (2,)
assert_allclose(actual, expected)
@pytest.mark.parametrize('start', [
np.array([1, 0, 0]),
np.array([0, 1]),
])
def test_degenerate_input(self, start):
# handle start == end with repeated value
# like np.linspace
expected = [start] * 5
actual = geometric_slerp(start=start,
end=start,
t=np.linspace(0, 1, 5))
assert_allclose(actual, expected)
@pytest.mark.parametrize('k', np.logspace(-10, -1, 10))
def test_numerical_stability_pi(self, k):
# geometric_slerp should have excellent numerical
# stability for angles approaching pi between
# the start and end points
angle = np.pi - k
ts = np.linspace(0, 1, 100)
P = np.array([1, 0, 0, 0])
Q = np.array([np.cos(angle), np.sin(angle), 0, 0])
# the test should only be enforced for cases where
# geometric_slerp determines that the input is actually
# on the unit sphere
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning)
result = geometric_slerp(P, Q, ts, 1e-18)
norms = np.linalg.norm(result, axis=1)
error = np.max(np.abs(norms - 1))
assert error < 4e-15
| bsd-3-clause |
EricRho/home-assistant | tests/test_config.py | 3 | 4496 | """
tests.test_config
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests config utils.
"""
# pylint: disable=too-many-public-methods,protected-access
import unittest
import unittest.mock as mock
import os
from homeassistant.core import DOMAIN, HomeAssistantError
import homeassistant.config as config_util
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME,
CONF_TIME_ZONE)
from common import get_test_config_dir, mock_detect_location_info
CONFIG_DIR = get_test_config_dir()
YAML_PATH = os.path.join(CONFIG_DIR, config_util.YAML_CONFIG_FILE)
def create_file(path):
""" Creates an empty file. """
with open(path, 'w'):
pass
class TestConfig(unittest.TestCase):
""" Test the config utils. """
def tearDown(self): # pylint: disable=invalid-name
""" Clean up. """
if os.path.isfile(YAML_PATH):
os.remove(YAML_PATH)
def test_create_default_config(self):
""" Test creationg of default config. """
config_util.create_default_config(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
def test_find_config_file_yaml(self):
""" Test if it finds a YAML config file. """
create_file(YAML_PATH)
self.assertEqual(YAML_PATH, config_util.find_config_file(CONFIG_DIR))
@mock.patch('builtins.print')
def test_ensure_config_exists_creates_config(self, mock_print):
""" Test that calling ensure_config_exists creates a new config file if
none exists. """
config_util.ensure_config_exists(CONFIG_DIR, False)
self.assertTrue(os.path.isfile(YAML_PATH))
self.assertTrue(mock_print.called)
def test_ensure_config_exists_uses_existing_config(self):
""" Test that calling ensure_config_exists uses existing config. """
create_file(YAML_PATH)
config_util.ensure_config_exists(CONFIG_DIR, False)
with open(YAML_PATH) as f:
content = f.read()
# File created with create_file are empty
self.assertEqual('', content)
def test_load_yaml_config_converts_empty_files_to_dict(self):
""" Test that loading an empty file returns an empty dict. """
create_file(YAML_PATH)
self.assertIsInstance(
config_util.load_yaml_config_file(YAML_PATH), dict)
def test_load_yaml_config_raises_error_if_not_dict(self):
""" Test error raised when YAML file is not a dict. """
with open(YAML_PATH, 'w') as f:
f.write('5')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_yaml_config_raises_error_if_malformed_yaml(self):
""" Test error raised if invalid YAML. """
with open(YAML_PATH, 'w') as f:
f.write(':')
with self.assertRaises(HomeAssistantError):
config_util.load_yaml_config_file(YAML_PATH)
def test_load_config_loads_yaml_config(self):
""" Test correct YAML config loading. """
with open(YAML_PATH, 'w') as f:
f.write('hello: world')
self.assertEqual({'hello': 'world'},
config_util.load_config_file(YAML_PATH))
@mock.patch('homeassistant.util.location.detect_location_info',
mock_detect_location_info)
@mock.patch('builtins.print')
def test_create_default_config_detect_location(self, mock_print):
""" Test that detect location sets the correct config keys. """
config_util.ensure_config_exists(CONFIG_DIR)
config = config_util.load_config_file(YAML_PATH)
self.assertIn(DOMAIN, config)
ha_conf = config[DOMAIN]
expected_values = {
CONF_LATITUDE: 2.0,
CONF_LONGITUDE: 1.0,
CONF_TEMPERATURE_UNIT: 'F',
CONF_NAME: 'Home',
CONF_TIME_ZONE: 'America/Los_Angeles'
}
self.assertEqual(expected_values, ha_conf)
self.assertTrue(mock_print.called)
@mock.patch('builtins.print')
def test_create_default_config_returns_none_if_write_error(self,
mock_print):
"""
Test that writing default config to non existing folder returns None.
"""
self.assertIsNone(
config_util.create_default_config(
os.path.join(CONFIG_DIR, 'non_existing_dir/'), False))
self.assertTrue(mock_print.called)
| mit |
vijayanandnandam/youtube-dl | youtube_dl/extractor/ccc.py | 50 | 2839 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class CCCIE(InfoExtractor):
IE_NAME = 'media.ccc.de'
_VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/v/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://media.ccc.de/v/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor#video',
'md5': '3a1eda8f3a29515d27f5adb967d7e740',
'info_dict': {
'id': '1839',
'ext': 'mp4',
'title': 'Introduction to Processor Design',
'description': 'md5:df55f6d073d4ceae55aae6f2fd98a0ac',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20131228',
'timestamp': 1388188800,
'duration': 3710,
}
}, {
'url': 'https://media.ccc.de/v/32c3-7368-shopshifting#download',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
event_id = self._search_regex(r"data-id='(\d+)'", webpage, 'event id')
event_data = self._download_json('https://media.ccc.de/public/events/%s' % event_id, event_id)
formats = []
for recording in event_data.get('recordings', []):
recording_url = recording.get('recording_url')
if not recording_url:
continue
language = recording.get('language')
folder = recording.get('folder')
format_id = None
if language:
format_id = language
if folder:
if language:
format_id += '-' + folder
else:
format_id = folder
vcodec = 'h264' if 'h264' in folder else (
'none' if folder in ('mp3', 'opus') else None
)
formats.append({
'format_id': format_id,
'url': recording_url,
'width': int_or_none(recording.get('width')),
'height': int_or_none(recording.get('height')),
'filesize': int_or_none(recording.get('size'), invscale=1024 * 1024),
'language': language,
'vcodec': vcodec,
})
self._sort_formats(formats)
return {
'id': event_id,
'display_id': display_id,
'title': event_data['title'],
'description': event_data.get('description'),
'thumbnail': event_data.get('thumb_url'),
'timestamp': parse_iso8601(event_data.get('date')),
'duration': int_or_none(event_data.get('length')),
'tags': event_data.get('tags'),
'formats': formats,
}
| unlicense |
apache/airflow | airflow/providers/google/cloud/operators/cloud_build.py | 3 | 8638 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Operators that integrate with Google Cloud Build service."""
import json
import re
from copy import deepcopy
from typing import Any, Dict, Optional, Sequence, Union
from urllib.parse import unquote, urlparse
try:
import airflow.utils.yaml as yaml
except ImportError:
import yaml
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.cloud_build import CloudBuildHook
REGEX_REPO_PATH = re.compile(r"^/p/(?P<project_id>[^/]+)/r/(?P<repo_name>[^/]+)")
class BuildProcessor:
"""
Processes build configurations to add additional functionality to support the use of operators.
The following improvements are made:
* It is required to provide the source and only one type can be given,
* It is possible to provide the source as the URL address instead dict.
:param body: The request body.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds
:type body: dict
"""
def __init__(self, body: dict) -> None:
self.body = deepcopy(body)
def _verify_source(self) -> None:
is_storage = "storageSource" in self.body["source"]
is_repo = "repoSource" in self.body["source"]
sources_count = sum([is_storage, is_repo])
if sources_count != 1:
raise AirflowException(
"The source could not be determined. Please choose one data source from: "
"storageSource and repoSource."
)
def _reformat_source(self) -> None:
self._reformat_repo_source()
self._reformat_storage_source()
def _reformat_repo_source(self) -> None:
if "repoSource" not in self.body["source"]:
return
source = self.body["source"]["repoSource"]
if not isinstance(source, str):
return
self.body["source"]["repoSource"] = self._convert_repo_url_to_dict(source)
def _reformat_storage_source(self) -> None:
if "storageSource" not in self.body["source"]:
return
source = self.body["source"]["storageSource"]
if not isinstance(source, str):
return
self.body["source"]["storageSource"] = self._convert_storage_url_to_dict(source)
def process_body(self) -> dict:
"""
Processes the body passed in the constructor
:return: the body.
:type: dict
"""
if 'source' in self.body:
self._verify_source()
self._reformat_source()
return self.body
@staticmethod
def _convert_repo_url_to_dict(source):
"""
Convert url to repository in Google Cloud Source to a format supported by the API
Example valid input:
.. code-block:: none
https://source.developers.google.com/p/airflow-project/r/airflow-repo#branch-name
"""
url_parts = urlparse(source)
match = REGEX_REPO_PATH.search(url_parts.path)
if url_parts.scheme != "https" or url_parts.hostname != "source.developers.google.com" or not match:
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"https://source.developers.google.com/p/airflow-project/r/airflow-repo#branch-name"
)
project_id = unquote(match.group("project_id"))
repo_name = unquote(match.group("repo_name"))
source_dict = {"projectId": project_id, "repoName": repo_name, "branchName": "master"}
if url_parts.fragment:
source_dict["branchName"] = url_parts.fragment
return source_dict
@staticmethod
def _convert_storage_url_to_dict(storage_url: str) -> Dict[str, Any]:
"""
Convert url to object in Google Cloud Storage to a format supported by the API
Example valid input:
.. code-block:: none
gs://bucket-name/object-name.tar.gz
"""
url_parts = urlparse(storage_url)
if url_parts.scheme != "gs" or not url_parts.hostname or not url_parts.path or url_parts.path == "/":
raise AirflowException(
"Invalid URL. You must pass the URL in the format: "
"gs://bucket-name/object-name.tar.gz#24565443"
)
source_dict = {"bucket": url_parts.hostname, "object": url_parts.path[1:]}
if url_parts.fragment:
source_dict["generation"] = url_parts.fragment
return source_dict
class CloudBuildCreateBuildOperator(BaseOperator):
"""
Starts a build with the specified configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildCreateBuildOperator`
:param body: The build config with instructions to perform with CloudBuild.
Can be a dictionary or path to a file type like YAML or JSON.
See: https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds
:type body: dict or string
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:type project_id: str
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param api_version: API version used (for example v1 or v1beta1).
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
template_ext = ['.yml', '.yaml', '.json']
def __init__(
self,
*,
body: Union[dict, str],
project_id: Optional[str] = None,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
# Not template fields to keep original value
self.body_raw = body
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
self.impersonation_chain = impersonation_chain
def prepare_template(self) -> None:
# if no file is specified, skip
if not isinstance(self.body_raw, str):
return
with open(self.body_raw) as file:
if any(self.body_raw.endswith(ext) for ext in ['.yaml', '.yml']):
self.body = yaml.load(file.read(), Loader=yaml.FullLoader)
if self.body_raw.endswith('.json'):
self.body = json.loads(file.read())
def _validate_inputs(self) -> None:
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
def execute(self, context):
hook = CloudBuildHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
body = BuildProcessor(body=self.body).process_body()
return hook.create_build(body=body, project_id=self.project_id)
| apache-2.0 |
jkshaver/virtualenv-1.8.2 | env/lib/python2.7/site-packages/django/utils/unittest/case.py | 103 | 42486 | """Test case implementation"""
import sys
import difflib
import pprint
import re
import unittest
import warnings
from django.utils.unittest import result
from django.utils.unittest.util import\
safe_repr, safe_str, strclass,\
unorderable_list_difference
from django.utils.unittest.compatibility import wraps
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, basestring):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = True
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("Use of a TestResult without an addSkip method is deprecated",
DeprecationWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure, e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except Exception:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass,'__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80*8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertTrue(isinstance(d1, dict), 'First argument is not a dictionary')
self.assertTrue(isinstance(d2, dict), 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertTrue(isinstance(first, basestring), (
'First argument is not a string'))
self.assertTrue(isinstance(second, basestring), (
'Second argument is not a string'))
if first != second:
standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
if callable_obj is None:
return _AssertRaisesContext(expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception, exc_value:
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| mit |
partofthething/home-assistant | tests/components/zha/test_api.py | 6 | 18801 | """Test ZHA API."""
from binascii import unhexlify
from unittest.mock import AsyncMock, patch
import pytest
import voluptuous as vol
import zigpy.profiles.zha
import zigpy.types
import zigpy.zcl.clusters.general as general
from homeassistant.components.websocket_api import const
from homeassistant.components.zha import DOMAIN
from homeassistant.components.zha.api import (
ATTR_DURATION,
ATTR_INSTALL_CODE,
ATTR_QR_CODE,
ATTR_SOURCE_IEEE,
ID,
SERVICE_PERMIT,
TYPE,
async_load_api,
)
from homeassistant.components.zha.core.const import (
ATTR_CLUSTER_ID,
ATTR_CLUSTER_TYPE,
ATTR_ENDPOINT_ID,
ATTR_ENDPOINT_NAMES,
ATTR_IEEE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NEIGHBORS,
ATTR_QUIRK_APPLIED,
CLUSTER_TYPE_IN,
DATA_ZHA,
DATA_ZHA_GATEWAY,
GROUP_ID,
GROUP_IDS,
GROUP_NAME,
)
from homeassistant.const import ATTR_NAME
from homeassistant.core import Context
from .conftest import FIXTURE_GRP_ID, FIXTURE_GRP_NAME
IEEE_SWITCH_DEVICE = "01:2d:6f:00:0a:90:69:e7"
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
@pytest.fixture
async def device_switch(hass, zigpy_device_mock, zha_device_joined):
"""Test zha switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.OnOff.cluster_id, general.Basic.cluster_id],
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
},
ieee=IEEE_SWITCH_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_groupable(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.Basic.cluster_id,
general.Groups.cluster_id,
],
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def zha_client(hass, hass_ws_client, device_switch, device_groupable):
"""Test zha switch platform."""
# load the ZHA API
async_load_api(hass)
return await hass_ws_client(hass)
async def test_device_clusters(hass, zha_client):
"""Test getting device cluster info."""
await zha_client.send_json(
{ID: 5, TYPE: "zha/devices/clusters", ATTR_IEEE: IEEE_SWITCH_DEVICE}
)
msg = await zha_client.receive_json()
assert len(msg["result"]) == 2
cluster_infos = sorted(msg["result"], key=lambda k: k[ID])
cluster_info = cluster_infos[0]
assert cluster_info[TYPE] == CLUSTER_TYPE_IN
assert cluster_info[ID] == 0
assert cluster_info[ATTR_NAME] == "Basic"
cluster_info = cluster_infos[1]
assert cluster_info[TYPE] == CLUSTER_TYPE_IN
assert cluster_info[ID] == 6
assert cluster_info[ATTR_NAME] == "OnOff"
async def test_device_cluster_attributes(zha_client):
"""Test getting device cluster attributes."""
await zha_client.send_json(
{
ID: 5,
TYPE: "zha/devices/clusters/attributes",
ATTR_ENDPOINT_ID: 1,
ATTR_IEEE: IEEE_SWITCH_DEVICE,
ATTR_CLUSTER_ID: 6,
ATTR_CLUSTER_TYPE: CLUSTER_TYPE_IN,
}
)
msg = await zha_client.receive_json()
attributes = msg["result"]
assert len(attributes) == 4
for attribute in attributes:
assert attribute[ID] is not None
assert attribute[ATTR_NAME] is not None
async def test_device_cluster_commands(zha_client):
"""Test getting device cluster commands."""
await zha_client.send_json(
{
ID: 5,
TYPE: "zha/devices/clusters/commands",
ATTR_ENDPOINT_ID: 1,
ATTR_IEEE: IEEE_SWITCH_DEVICE,
ATTR_CLUSTER_ID: 6,
ATTR_CLUSTER_TYPE: CLUSTER_TYPE_IN,
}
)
msg = await zha_client.receive_json()
commands = msg["result"]
assert len(commands) == 6
for command in commands:
assert command[ID] is not None
assert command[ATTR_NAME] is not None
assert command[TYPE] is not None
async def test_list_devices(zha_client):
"""Test getting zha devices."""
await zha_client.send_json({ID: 5, TYPE: "zha/devices"})
msg = await zha_client.receive_json()
devices = msg["result"]
assert len(devices) == 2
msg_id = 100
for device in devices:
msg_id += 1
assert device[ATTR_IEEE] is not None
assert device[ATTR_MANUFACTURER] is not None
assert device[ATTR_MODEL] is not None
assert device[ATTR_NAME] is not None
assert device[ATTR_QUIRK_APPLIED] is not None
assert device["entities"] is not None
assert device[ATTR_NEIGHBORS] is not None
assert device[ATTR_ENDPOINT_NAMES] is not None
for entity_reference in device["entities"]:
assert entity_reference[ATTR_NAME] is not None
assert entity_reference["entity_id"] is not None
await zha_client.send_json(
{ID: msg_id, TYPE: "zha/device", ATTR_IEEE: device[ATTR_IEEE]}
)
msg = await zha_client.receive_json()
device2 = msg["result"]
assert device == device2
async def test_device_not_found(zha_client):
"""Test not found response from get device API."""
await zha_client.send_json(
{ID: 6, TYPE: "zha/device", ATTR_IEEE: "28:6d:97:00:01:04:11:8c"}
)
msg = await zha_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
async def test_list_groups(zha_client):
"""Test getting zha zigbee groups."""
await zha_client.send_json({ID: 7, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 1
for group in groups:
assert group["group_id"] == FIXTURE_GRP_ID
assert group["name"] == FIXTURE_GRP_NAME
assert group["members"] == []
async def test_get_group(zha_client):
"""Test getting a specific zha zigbee group."""
await zha_client.send_json({ID: 8, TYPE: "zha/group", GROUP_ID: FIXTURE_GRP_ID})
msg = await zha_client.receive_json()
assert msg["id"] == 8
assert msg["type"] == const.TYPE_RESULT
group = msg["result"]
assert group is not None
assert group["group_id"] == FIXTURE_GRP_ID
assert group["name"] == FIXTURE_GRP_NAME
assert group["members"] == []
async def test_get_group_not_found(zha_client):
"""Test not found response from get group API."""
await zha_client.send_json({ID: 9, TYPE: "zha/group", GROUP_ID: 1_234_567})
msg = await zha_client.receive_json()
assert msg["id"] == 9
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
async def test_list_groupable_devices(zha_client, device_groupable):
"""Test getting zha devices that have a group cluster."""
await zha_client.send_json({ID: 10, TYPE: "zha/devices/groupable"})
msg = await zha_client.receive_json()
assert msg["id"] == 10
assert msg["type"] == const.TYPE_RESULT
device_endpoints = msg["result"]
assert len(device_endpoints) == 1
for endpoint in device_endpoints:
assert endpoint["device"][ATTR_IEEE] == "01:2d:6f:00:0a:90:69:e8"
assert endpoint["device"][ATTR_MANUFACTURER] is not None
assert endpoint["device"][ATTR_MODEL] is not None
assert endpoint["device"][ATTR_NAME] is not None
assert endpoint["device"][ATTR_QUIRK_APPLIED] is not None
assert endpoint["device"]["entities"] is not None
assert endpoint["endpoint_id"] is not None
assert endpoint["entities"] is not None
for entity_reference in endpoint["device"]["entities"]:
assert entity_reference[ATTR_NAME] is not None
assert entity_reference["entity_id"] is not None
for entity_reference in endpoint["entities"]:
assert entity_reference["original_name"] is not None
# Make sure there are no groupable devices when the device is unavailable
# Make device unavailable
device_groupable.available = False
await zha_client.send_json({ID: 11, TYPE: "zha/devices/groupable"})
msg = await zha_client.receive_json()
assert msg["id"] == 11
assert msg["type"] == const.TYPE_RESULT
device_endpoints = msg["result"]
assert len(device_endpoints) == 0
async def test_add_group(zha_client):
"""Test adding and getting a new zha zigbee group."""
await zha_client.send_json({ID: 12, TYPE: "zha/group/add", GROUP_NAME: "new_group"})
msg = await zha_client.receive_json()
assert msg["id"] == 12
assert msg["type"] == const.TYPE_RESULT
added_group = msg["result"]
assert added_group["name"] == "new_group"
assert added_group["members"] == []
await zha_client.send_json({ID: 13, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 13
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 2
for group in groups:
assert group["name"] == FIXTURE_GRP_NAME or group["name"] == "new_group"
async def test_remove_group(zha_client):
"""Test removing a new zha zigbee group."""
await zha_client.send_json({ID: 14, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 1
await zha_client.send_json(
{ID: 15, TYPE: "zha/group/remove", GROUP_IDS: [FIXTURE_GRP_ID]}
)
msg = await zha_client.receive_json()
assert msg["id"] == 15
assert msg["type"] == const.TYPE_RESULT
groups_remaining = msg["result"]
assert len(groups_remaining) == 0
await zha_client.send_json({ID: 16, TYPE: "zha/groups"})
msg = await zha_client.receive_json()
assert msg["id"] == 16
assert msg["type"] == const.TYPE_RESULT
groups = msg["result"]
assert len(groups) == 0
@pytest.fixture
async def app_controller(hass, setup_zha):
"""Fixture for zigpy Application Controller."""
await setup_zha()
controller = hass.data[DATA_ZHA][DATA_ZHA_GATEWAY].application_controller
p1 = patch.object(controller, "permit")
p2 = patch.object(controller, "permit_with_key", new=AsyncMock())
with p1, p2:
yield controller
@pytest.mark.parametrize(
"params, duration, node",
(
({}, 60, None),
({ATTR_DURATION: 30}, 30, None),
(
{ATTR_DURATION: 33, ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:dd"},
33,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:dd"),
),
(
{ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:d1"},
60,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:d1"),
),
),
)
async def test_permit_ha12(
hass, app_controller, hass_admin_user, params, duration, node
):
"""Test permit service."""
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 1
assert app_controller.permit.await_args[1]["time_s"] == duration
assert app_controller.permit.await_args[1]["node"] == node
assert app_controller.permit_with_key.call_count == 0
IC_TEST_PARAMS = (
(
{
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4051",
},
zigpy.types.EUI64.convert(IEEE_SWITCH_DEVICE),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
(
{
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_INSTALL_CODE: "52797BF4A5084DAA8E1712B61741CA024051",
},
zigpy.types.EUI64.convert(IEEE_SWITCH_DEVICE),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
)
@pytest.mark.parametrize("params, src_ieee, code", IC_TEST_PARAMS)
async def test_permit_with_install_code(
hass, app_controller, hass_admin_user, params, src_ieee, code
):
"""Test permit service with install code."""
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 1
assert app_controller.permit_with_key.await_args[1]["time_s"] == 60
assert app_controller.permit_with_key.await_args[1]["node"] == src_ieee
assert app_controller.permit_with_key.await_args[1]["code"] == code
IC_FAIL_PARAMS = (
{
# wrong install code
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4052",
},
# incorrect service params
{ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4051"},
{ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE},
{
# incorrect service params
ATTR_INSTALL_CODE: "5279-7BF4-A508-4DAA-8E17-12B6-1741-CA02-4051",
ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024051",
},
{
# incorrect service params
ATTR_SOURCE_IEEE: IEEE_SWITCH_DEVICE,
ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024051",
},
{
# good regex match, but bad code
ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024052"
},
{
# good aqara regex match, but bad code
ATTR_QR_CODE: (
"G$M:751$S:357S00001579$D:000000000F350FFD%Z$A:04CF8CDF"
"3C3C3C3C$I:52797BF4A5084DAA8E1712B61741CA024052"
)
},
# good consciot regex match, but bad code
{ATTR_QR_CODE: "000D6FFFFED4163B|52797BF4A5084DAA8E1712B61741CA024052"},
)
@pytest.mark.parametrize("params", IC_FAIL_PARAMS)
async def test_permit_with_install_code_fail(
hass, app_controller, hass_admin_user, params
):
"""Test permit service with install code."""
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 0
IC_QR_CODE_TEST_PARAMS = (
(
{ATTR_QR_CODE: "000D6FFFFED4163B|52797BF4A5084DAA8E1712B61741CA024051"},
zigpy.types.EUI64.convert("00:0D:6F:FF:FE:D4:16:3B"),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
(
{ATTR_QR_CODE: "Z:000D6FFFFED4163B$I:52797BF4A5084DAA8E1712B61741CA024051"},
zigpy.types.EUI64.convert("00:0D:6F:FF:FE:D4:16:3B"),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
(
{
ATTR_QR_CODE: (
"G$M:751$S:357S00001579$D:000000000F350FFD%Z$A:04CF8CDF"
"3C3C3C3C$I:52797BF4A5084DAA8E1712B61741CA024051"
)
},
zigpy.types.EUI64.convert("04:CF:8C:DF:3C:3C:3C:3C"),
unhexlify("52797BF4A5084DAA8E1712B61741CA024051"),
),
)
@pytest.mark.parametrize("params, src_ieee, code", IC_QR_CODE_TEST_PARAMS)
async def test_permit_with_qr_code(
hass, app_controller, hass_admin_user, params, src_ieee, code
):
"""Test permit service with install code from qr code."""
await hass.services.async_call(
DOMAIN, SERVICE_PERMIT, params, True, Context(user_id=hass_admin_user.id)
)
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 1
assert app_controller.permit_with_key.await_args[1]["time_s"] == 60
assert app_controller.permit_with_key.await_args[1]["node"] == src_ieee
assert app_controller.permit_with_key.await_args[1]["code"] == code
@pytest.mark.parametrize("params, src_ieee, code", IC_QR_CODE_TEST_PARAMS)
async def test_ws_permit_with_qr_code(
app_controller, zha_client, params, src_ieee, code
):
"""Test permit service with install code from qr code."""
await zha_client.send_json(
{ID: 14, TYPE: f"{DOMAIN}/devices/{SERVICE_PERMIT}", **params}
)
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 1
assert app_controller.permit_with_key.await_args[1]["time_s"] == 60
assert app_controller.permit_with_key.await_args[1]["node"] == src_ieee
assert app_controller.permit_with_key.await_args[1]["code"] == code
@pytest.mark.parametrize("params", IC_FAIL_PARAMS)
async def test_ws_permit_with_install_code_fail(app_controller, zha_client, params):
"""Test permit ws service with install code."""
await zha_client.send_json(
{ID: 14, TYPE: f"{DOMAIN}/devices/{SERVICE_PERMIT}", **params}
)
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
assert msg["success"] is False
assert app_controller.permit.await_count == 0
assert app_controller.permit_with_key.call_count == 0
@pytest.mark.parametrize(
"params, duration, node",
(
({}, 60, None),
({ATTR_DURATION: 30}, 30, None),
(
{ATTR_DURATION: 33, ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:dd"},
33,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:dd"),
),
(
{ATTR_IEEE: "aa:bb:cc:dd:aa:bb:cc:d1"},
60,
zigpy.types.EUI64.convert("aa:bb:cc:dd:aa:bb:cc:d1"),
),
),
)
async def test_ws_permit_ha12(app_controller, zha_client, params, duration, node):
"""Test permit ws service."""
await zha_client.send_json(
{ID: 14, TYPE: f"{DOMAIN}/devices/{SERVICE_PERMIT}", **params}
)
msg = await zha_client.receive_json()
assert msg["id"] == 14
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert app_controller.permit.await_count == 1
assert app_controller.permit.await_args[1]["time_s"] == duration
assert app_controller.permit.await_args[1]["node"] == node
assert app_controller.permit_with_key.call_count == 0
| mit |
hynnet/openwrt-mt7620 | staging_dir/host/lib/python2.7/optparse.py | 175 | 61124 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import types
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = attrs.keys()
attrs.sort()
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not hasattr(self.callback, '__call__'):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
try:
basestring
except NameError:
def isbasestring(x):
return isinstance(x, (types.StringType, types.UnicodeType))
else:
def isbasestring(x):
return isinstance(x, basestring)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) in types.StringTypes:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
# used by test suite
def _get_encoding(self, file):
encoding = getattr(file, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
return encoding
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
encoding = self._get_encoding(file)
file.write(self.format_help().encode(encoding, "replace"))
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-2.0 |
jdkernel/incrediblec_sense_2.6.35 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
chromaway/ngcccbase | install_https.py | 4 | 1916 | #!/usr/bin/env python
import urllib2
import httplib
import ssl
import socket
import os
import sys
try:
main_file = os.path.abspath(sys.modules['__main__'].__file__)
except AttributeError:
main_file = sys.executable
CERT_FILE = os.path.join(os.path.dirname(main_file), 'cacert.pem')
class ValidHTTPSConnection(httplib.HTTPConnection):
"This class allows communication via SSL."
default_port = httplib.HTTPS_PORT
def __init__(self, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock,
ca_certs=CERT_FILE,
cert_reqs=ssl.CERT_REQUIRED)
class ValidHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(ValidHTTPSConnection, req)
urllib2.install_opener(urllib2.build_opener(ValidHTTPSHandler))
if __name__ == "__main__":
def test_access(url):
print "Acessing", url
page = urllib2.urlopen(url)
print page.info()
data = page.read()
print "First 100 bytes:", data[0:100]
print "Done accesing", url
print ""
# This should work
test_access("https://blockchain.info")
test_access("http://www.google.com")
# Accessing a page with a self signed certificate should not work
# At the time of writing, the following page uses a self signed certificate
test_access("https://tidia.ita.br/")
| mit |
hopeall/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ticosax/django | tests/template_tests/filter_tests/test_timesince.py | 207 | 5422 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timesince_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimesinceTests(TimezoneTestCase):
"""
#20246 - \xa0 in output avoids line-breaks between value and unit
"""
# Default compare with datetime.now()
@setup({'timesince01': '{{ a|timesince }}'})
def test_timesince01(self):
output = self.engine.render_to_string('timesince01', {'a': datetime.now() + timedelta(minutes=-1, seconds=-10)})
self.assertEqual(output, '1\xa0minute')
@setup({'timesince02': '{{ a|timesince }}'})
def test_timesince02(self):
output = self.engine.render_to_string('timesince02', {'a': datetime.now() - timedelta(days=1, minutes=1)})
self.assertEqual(output, '1\xa0day')
@setup({'timesince03': '{{ a|timesince }}'})
def test_timesince03(self):
output = self.engine.render_to_string('timesince03', {'a': datetime.now() - timedelta(hours=1, minutes=25, seconds=10)})
self.assertEqual(output, '1\xa0hour, 25\xa0minutes')
# Compare to a given parameter
@setup({'timesince04': '{{ a|timesince:b }}'})
def test_timesince04(self):
output = self.engine.render_to_string(
'timesince04',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=1)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timesince05': '{{ a|timesince:b }}'})
def test_timesince05(self):
output = self.engine.render_to_string(
'timesince05',
{'a': self.now - timedelta(days=2, minutes=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0minute')
# Check that timezone is respected
@setup({'timesince06': '{{ a|timesince:b }}'})
def test_timesince06(self):
output = self.engine.render_to_string('timesince06', {'a': self.now_tz - timedelta(hours=8), 'b': self.now_tz})
self.assertEqual(output, '8\xa0hours')
# Tests for #7443
@setup({'timesince07': '{{ earlier|timesince }}'})
def test_timesince07(self):
output = self.engine.render_to_string('timesince07', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
@setup({'timesince08': '{{ earlier|timesince:now }}'})
def test_timesince08(self):
output = self.engine.render_to_string('timesince08', {'now': self.now, 'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
@setup({'timesince09': '{{ later|timesince }}'})
def test_timesince09(self):
output = self.engine.render_to_string('timesince09', {'later': self.now + timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince10': '{{ later|timesince:now }}'})
def test_timesince10(self):
output = self.engine.render_to_string('timesince10', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
# Ensures that differing timezones are calculated correctly.
@setup({'timesince11': '{{ a|timesince }}'})
def test_timesince11(self):
output = self.engine.render_to_string('timesince11', {'a': self.now})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timesince12': '{{ a|timesince }}'})
def test_timesince12(self):
output = self.engine.render_to_string('timesince12', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timesince13': '{{ a|timesince }}'})
def test_timesince13(self):
output = self.engine.render_to_string('timesince13', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince14': '{{ a|timesince:b }}'})
def test_timesince14(self):
output = self.engine.render_to_string('timesince14', {'a': self.now_tz, 'b': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince15': '{{ a|timesince:b }}'})
def test_timesince15(self):
output = self.engine.render_to_string('timesince15', {'a': self.now, 'b': self.now_tz_i})
self.assertEqual(output, '')
@setup({'timesince16': '{{ a|timesince:b }}'})
def test_timesince16(self):
output = self.engine.render_to_string('timesince16', {'a': self.now_tz_i, 'b': self.now})
self.assertEqual(output, '')
# Tests for #9065 (two date objects).
@setup({'timesince17': '{{ a|timesince:b }}'})
def test_timesince17(self):
output = self.engine.render_to_string('timesince17', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timesince18': '{{ a|timesince:b }}'})
def test_timesince18(self):
output = self.engine.render_to_string('timesince18', {'a': self.today, 'b': self.today + timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_since_now(self):
self.assertEqual(timesince_filter(datetime.now() - timedelta(1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timesince_filter(datetime(2005, 12, 29), datetime(2005, 12, 30)), '1\xa0day')
| bsd-3-clause |
pepetreshere/odoo | addons/website_slides_forum/models/slide_channel.py | 3 | 1510 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class Channel(models.Model):
_inherit = 'slide.channel'
forum_id = fields.Many2one('forum.forum', 'Course Forum')
forum_total_posts = fields.Integer('Number of active forum posts', related="forum_id.total_posts")
_sql_constraints = [
('forum_uniq', 'unique (forum_id)', "Only one course per forum!"),
]
def action_redirect_to_forum(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("website_forum.action_forum_post")
action['view_mode'] = 'tree'
action['context'] = {
'create': False
}
action['domain'] = [('forum_id', '=', self.forum_id.id)]
return action
@api.model
def create(self, vals):
channel = super(Channel, self.with_context(mail_create_nosubscribe=True)).create(vals)
if channel.forum_id:
channel.forum_id.privacy = False
return channel
def write(self, vals):
old_forum = self.forum_id
res = super(Channel, self).write(vals)
if 'forum_id' in vals:
self.forum_id.privacy = False
if old_forum != self.forum_id:
old_forum.write({
'privacy': 'private',
'authorized_group_id': self.env.ref('website_slides.group_website_slides_officer').id,
})
return res
| agpl-3.0 |
seamile/Weeds | BitSet/bitset.py | 1 | 3043 | from typing import Sequence
class BitmapSet:
'''使用 Bigmap 定义的 Set'''
def __init__(self, items: Sequence[int]) -> None:
self.bit_set = bytearray()
self.update(items)
@property
def count(self):
'''元素数量'''
n = 0
for byte in self.bit_set:
while byte:
byte &= byte - 1
n += 1
return n
@property
def length(self) -> int:
'''字节长度'''
return len(self.bit_set)
@property
def bit_size(self) -> int:
'''位长度'''
return self.length * 8
@staticmethod
def decompose(num):
while num:
yield (num ^ (num - 1)).bit_length()
num &= (num - 1)
def expand(self, length):
'''扩容指定长度字节'''
self.bit_set.extend(b'\x00' * length)
def ismember(self, num: int) -> bool:
'''检查某对象是否是一个成员'''
idx, offset = divmod(num, 8)
return bool(self.bit_set[idx] & (1 << offset))
def member(self, idx: int, offset: int) -> int:
'''获取某个位置的值'''
return idx * 8 + offset
def members(self):
'''获取所有元素'''
for idx, num in enumerate(self.bit_set):
for offset in self.decompose(num):
yield idx * 8 + offset - 1
def add(self, num: int) -> None:
'''添加一个元素'''
idx, offset = divmod(num, 8)
need_length = idx + 1
if self.length < need_length:
self.expand(need_length - self.length)
self.bit_set[idx] |= 1 << offset
def pop(self, num: int) -> None:
'''弹出一个元素'''
idx, offset = divmod(num, 8)
if idx < self.length:
self.bit_set[idx] &= 0xff ^ (1 << offset)
def update(self, other: Sequence[int]) -> None:
'''将一个序列更新到 BitmapSet'''
for num in other:
self.add(num)
def inter(self, other: Sequence[int]) -> 'BitmapSet':
'''交集'''
for num in other:
print(num)
def diff(self, other: Sequence[int]) -> 'BitmapSet':
'''差集'''
def union(self, other: Sequence[int]) -> 'BitmapSet':
'''并集'''
if __name__ == "__main__":
items = [0, 1, 2, 3, 4, 7, 8, 9, 15, 16, 17, 19, 63, 64, 71, 81, 97, 100, 1023, 1024, 1025]
b = BitmapSet(items)
assert b.count == len(items)
idx, offset = divmod(max(items), 8)
assert b.length == idx + 1
assert b.bit_size >= idx * 8 + offset, f'{b.bit_size} >= {idx * 8 + offset}'
for i in range(max(items)):
assert b.ismember(i) == (i in items)
assert sorted(b.members()) == sorted(items)
o_cnt = b.count
b.add(100)
assert b.count == o_cnt, f'{b.count} != {o_cnt}'
b.add(111)
assert b.count == o_cnt + 1, f'{b.count} != {o_cnt+1}'
b.pop(111)
assert b.count == o_cnt, f'{b.count} != {o_cnt}'
b.pop(5)
assert b.count == o_cnt, f'{b.count} != {o_cnt}'
| mit |
coreynicholson/youtube-dl | youtube_dl/extractor/go.py | 29 | 8340 | # coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..utils import (
int_or_none,
determine_ext,
parse_age_limit,
urlencode_postdata,
ExtractorError,
)
class GoIE(AdobePassIE):
_SITE_INFO = {
'abc': {
'brand': '001',
'requestor_id': 'ABC',
},
'freeform': {
'brand': '002',
'requestor_id': 'ABCFamily',
},
'watchdisneychannel': {
'brand': '004',
'requestor_id': 'Disney',
},
'watchdisneyjunior': {
'brand': '008',
'requestor_id': 'DisneyJunior',
},
'watchdisneyxd': {
'brand': '009',
'requestor_id': 'DisneyXD',
}
}
_VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:(?:[^/]+/)*(?P<id>vdka\w+)|(?:[^/]+/)*(?P<display_id>[^/?#]+))' % '|'.join(_SITE_INFO.keys())
_TESTS = [{
'url': 'http://abc.go.com/shows/designated-survivor/video/most-recent/VDKA3807643',
'info_dict': {
'id': 'VDKA3807643',
'ext': 'mp4',
'title': 'The Traitor in the White House',
'description': 'md5:05b009d2d145a1e85d25111bd37222e8',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://watchdisneyxd.go.com/doraemon',
'info_dict': {
'title': 'Doraemon',
'id': 'SH55574025',
},
'playlist_mincount': 51,
}, {
'url': 'http://abc.go.com/shows/the-catch/episode-guide/season-01/10-the-wedding',
'only_matching': True,
}, {
'url': 'http://abc.go.com/shows/world-news-tonight/episode-guide/2017-02/17-021717-intense-stand-off-between-man-with-rifle-and-police-in-oakland',
'only_matching': True,
}]
def _extract_videos(self, brand, video_id='-1', show_id='-1'):
display_id = video_id if video_id != '-1' else show_id
return self._download_json(
'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/%s/001/-1/%s/-1/%s/-1/-1.json' % (brand, show_id, video_id),
display_id)['video']
def _real_extract(self, url):
sub_domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
site_info = self._SITE_INFO[sub_domain]
brand = site_info['brand']
if not video_id:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
# There may be inner quotes, e.g. data-video-id="'VDKA3609139'"
# from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood
r'data-video-id=["\']*(VDKA\w+)', webpage, 'video id', default=None)
if not video_id:
# show extraction works for Disney, DisneyJunior and DisneyXD
# ABC and Freeform has different layout
show_id = self._search_regex(r'data-show-id=["\']*(SH\d+)', webpage, 'show id')
videos = self._extract_videos(brand, show_id=show_id)
show_title = self._search_regex(r'data-show-title="([^"]+)"', webpage, 'show title', fatal=False)
entries = []
for video in videos:
entries.append(self.url_result(
video['url'], 'Go', video.get('id'), video.get('title')))
entries.reverse()
return self.playlist_result(entries, show_id, show_title)
video_data = self._extract_videos(brand, video_id)[0]
video_id = video_data['id']
title = video_data['title']
formats = []
for asset in video_data.get('assets', {}).get('asset', []):
asset_url = asset.get('value')
if not asset_url:
continue
format_id = asset.get('format')
ext = determine_ext(asset_url)
if ext == 'm3u8':
video_type = video_data.get('type')
data = {
'video_id': video_data['id'],
'video_type': video_type,
'brand': brand,
'device': '001',
}
if video_data.get('accesslevel') == '1':
requestor_id = site_info['requestor_id']
resource = self._get_mvpd_resource(
requestor_id, title, video_id, None)
auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
data.update({
'token': auth,
'token_type': 'ap',
'adobe_requestor_id': requestor_id,
})
else:
self._initialize_geo_bypass(['US'])
entitlement = self._download_json(
'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json',
video_id, data=urlencode_postdata(data))
errors = entitlement.get('errors', {}).get('errors', [])
if errors:
for error in errors:
if error.get('code') == 1002:
self.raise_geo_restricted(
error['message'], countries=['US'])
error_message = ', '.join([error['message'] for error in errors])
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
asset_url += '?' + entitlement['uplynkData']['sessionKey']
formats.extend(self._extract_m3u8_formats(
asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False))
else:
f = {
'format_id': format_id,
'url': asset_url,
'ext': ext,
}
if re.search(r'(?:/mp4/source/|_source\.mp4)', asset_url):
f.update({
'format_id': ('%s-' % format_id if format_id else '') + 'SOURCE',
'preference': 1,
})
else:
mobj = re.search(r'/(\d+)x(\d+)/', asset_url)
if mobj:
height = int(mobj.group(2))
f.update({
'format_id': ('%s-' % format_id if format_id else '') + '%dP' % height,
'width': int(mobj.group(1)),
'height': height,
})
formats.append(f)
self._sort_formats(formats)
subtitles = {}
for cc in video_data.get('closedcaption', {}).get('src', []):
cc_url = cc.get('value')
if not cc_url:
continue
ext = determine_ext(cc_url)
if ext == 'xml':
ext = 'ttml'
subtitles.setdefault(cc.get('lang'), []).append({
'url': cc_url,
'ext': ext,
})
thumbnails = []
for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []):
thumbnail_url = thumbnail.get('value')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('longdescription') or video_data.get('description'),
'duration': int_or_none(video_data.get('duration', {}).get('value'), 1000),
'age_limit': parse_age_limit(video_data.get('tvrating', {}).get('rating')),
'episode_number': int_or_none(video_data.get('episodenumber')),
'series': video_data.get('show', {}).get('title'),
'season_number': int_or_none(video_data.get('season', {}).get('num')),
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
ARMmbed/yotta | yotta/remove.py | 3 | 2265 | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import logging
import os
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
# validate, , validate things, internal
from yotta.lib import validate
def addOptions(parser):
parser.add_argument('module', default=None, nargs='?', metavar='<module>',
help='Name of the module to remove. If omitted the current module '+
'or target will be removed from the global linking directory.'
)
def execCommand(args, following_args):
module_or_target = 'module'
if 'target' in args.subcommand_name:
module_or_target = 'target'
if args.module is not None:
return removeDependency(args, module_or_target)
else:
return removeGlobally(module_or_target)
def rmLinkOrDirectory(path, nonexistent_warning):
if not os.path.exists(path):
logging.warning(nonexistent_warning)
return 1
if fsutils.isLink(path):
fsutils.rmF(path)
else:
fsutils.rmRf(path)
return 0
def removeGlobally(module_or_target):
# folders, , get places to install things, internal
from yotta.lib import folders
if module_or_target == 'module':
global_dir = folders.globalInstallDirectory()
p = validate.currentDirectoryModule()
else:
global_dir = folders.globalTargetInstallDirectory()
p = validate.currentDirectoryTarget()
if p is None:
return 1
path = os.path.join(global_dir, p.getName())
return rmLinkOrDirectory(path, ('%s is not linked globally' % p.getName()))
def removeDependency(args, module_or_target):
c = validate.currentDirectoryModule()
if not c:
return 1
if module_or_target == 'module':
subdir = c.modulesPath()
err = validate.componentNameValidationError(args.module)
else:
subdir = c.targetsPath()
err = validate.targetNameValidationError(args.module)
if err:
logging.error(err)
return 1
path = os.path.join(subdir, args.module)
return rmLinkOrDirectory(path, '%s %s not found' % (('dependency', 'target')[module_or_target=='target'], args.module))
| apache-2.0 |
nolanliou/tensorflow | tensorflow/python/framework/op_def_registry.py | 196 | 1428 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import op_def_pb2
_registered_ops = {}
def register_op_list(op_list):
"""Register all the ops in an op_def_pb2.OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
if op_def.name in _registered_ops:
assert _registered_ops[op_def.name] == op_def
else:
_registered_ops[op_def.name] = op_def
def get_registered_ops():
"""Returns a dictionary mapping names to OpDefs."""
return _registered_ops
| apache-2.0 |
manipopopo/tensorflow | tensorflow/python/ops/distributions/normal.py | 16 | 9263 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
@tf_export("distributions.Normal")
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.distributions.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.distributions.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.distributions.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +
half * (ratio - one - math_ops.log(ratio)))
| apache-2.0 |
charris/numpy | numpy/lib/arraysetops.py | 4 | 26426 | """
Set operations for arrays based on sorting.
Notes
-----
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
`numpy.sort`, that can provide directly the permutation vectors, thus avoiding
calls to `numpy.argsort`.
Original author: Robert Cimrman
"""
import functools
import numpy as np
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d', 'isin'
]
def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
return (ary, to_end, to_begin)
@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, ..., -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
# force a 1d array
ary = np.asanyarray(ary).ravel()
# enforce that the dtype of `ary` is used for the output
dtype_req = ary.dtype
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
if to_begin is None:
l_begin = 0
else:
to_begin = np.asanyarray(to_begin)
if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_begin` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
to_end = np.asanyarray(to_end)
if not np.can_cast(to_end, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
l_diff = max(len(ary) - 1, 0)
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
result = ary.__array_wrap__(result)
if l_begin > 0:
result[:l_begin] = to_begin
if l_end > 0:
result[l_begin + l_diff:] = to_end
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
return result
def _unpack_tuple(x):
""" Unpacks one-element tuples for use as return values """
if len(x) == 1:
return x[0]
else:
return x
def _unique_dispatcher(ar, return_index=None, return_inverse=None,
return_counts=None, axis=None):
return (ar,)
@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : array_like
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
.. versionadded:: 1.9.0
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. Object arrays or structured arrays
that contain objects are not supported if the `axis` kwarg is used. The
default is None.
.. versionadded:: 1.13.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
repeat : Repeat elements of an array.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
(move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
.. versionchanged: NumPy 1.21
If nan values are in the input array, a single nan is put
to the end of the sorted unique values.
Also for complex arrays all NaN values are considered equivalent
(no matter whether the NaN is in the real or imaginary part).
As the representant for the returned array the smallest one in the
lexicographical order is chosen - see np.sort for how the lexicographical
order is defined for complex arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'], dtype='<U1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'], dtype='<U1')
Reconstruct the input array from the unique values and inverse:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
Reconstruct the input values from the unique values and counts:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> values, counts = np.unique(a, return_counts=True)
>>> values
array([1, 2, 3, 4, 6])
>>> counts
array([1, 3, 1, 1, 1])
>>> np.repeat(values, counts)
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
return _unpack_tuple(ret)
# axis was specified and not None
try:
ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim) from None
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
ar = np.ascontiguousarray(ar)
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
# At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
# data type with `m` fields where each field has the data type of `ar`.
# In the following, we create the array `consolidated`, which has
# shape `(n,)` with data type `dtype`.
try:
if ar.shape[1] > 0:
consolidated = ar.view(dtype)
else:
# If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
# a data type with itemsize 0, and the call `ar.view(dtype)` will
# fail. Instead, we'll use `np.empty` to explicitly create the
# array with shape `(len(ar),)`. Because `dtype` in this case has
# itemsize 0, the total size of the result is still 0 bytes.
consolidated = np.empty(len(ar), dtype=dtype)
except TypeError as e:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=ar.dtype)) from e
def reshape_uniq(uniq):
n = len(uniq)
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(n, *orig_shape[1:])
uniq = np.moveaxis(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
return_inverse, return_counts)
output = (reshape_uniq(output[0]),) + output[1:]
return _unpack_tuple(output)
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
mask = np.empty(aux.shape, dtype=np.bool_)
mask[:1] = True
if aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and np.isnan(aux[-1]):
if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
else:
aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
mask[1:aux_firstnan] = (aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
mask[aux_firstnan] = True
mask[aux_firstnan + 1:] = False
else:
mask[1:] = aux[1:] != aux[:-1]
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = np.cumsum(mask) - 1
inv_idx = np.empty(mask.shape, dtype=np.intp)
inv_idx[perm] = imask
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
ret += (np.diff(idx),)
return ret
def _intersect1d_dispatcher(
ar1, ar2, assume_unique=None, return_indices=None):
return (ar1, ar2)
@array_function_dispatch(_intersect1d_dispatcher)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
unique, incorrect results and out-of-bounds indices could result.
Default is False.
return_indices : bool
If True, the indices which correspond to the intersection of the two
arrays are returned. The first instance of a value is used if there are
multiple. Default is False.
.. versionadded:: 1.15.0
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
comm1 : ndarray
The indices of the first occurrences of the common values in `ar1`.
Only provided if `return_indices` is True.
comm2 : ndarray
The indices of the first occurrences of the common values in `ar2`.
Only provided if `return_indices` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
To intersect more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
To return the indices of the values common to the input arrays
along with the intersected values:
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
>>> x_ind, y_ind
(array([0, 2, 4]), array([1, 0, 2]))
>>> xy, x[x_ind], y[y_ind]
(array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
"""
ar1 = np.asanyarray(ar1)
ar2 = np.asanyarray(ar2)
if not assume_unique:
if return_indices:
ar1, ind1 = unique(ar1, return_index=True)
ar2, ind2 = unique(ar2, return_index=True)
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
else:
ar1 = ar1.ravel()
ar2 = ar2.ravel()
aux = np.concatenate((ar1, ar2))
if return_indices:
aux_sort_indices = np.argsort(aux, kind='mergesort')
aux = aux[aux_sort_indices]
else:
aux.sort()
mask = aux[1:] == aux[:-1]
int1d = aux[:-1][mask]
if return_indices:
ar1_indices = aux_sort_indices[:-1][mask]
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
if not assume_unique:
ar1_indices = ind1[ar1_indices]
ar2_indices = ind2[ar2_indices]
return int1d, ar1_indices, ar2_indices
else:
return int1d
def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
return (ar1, ar2)
@array_function_dispatch(_setxor1d_dispatcher)
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
return aux[flag[1:] & flag[:-1]]
def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None):
return (ar1, ar2)
@array_function_dispatch(_in1d_dispatcher)
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
We recommend using :func:`isin` instead of `in1d` for new code.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `ar1` is in `ar2` and True otherwise).
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``np.invert(in1d(a, b))``.
.. versionadded:: 1.8.0
Returns
-------
in1d : (M,) ndarray, bool
The values `ar1[in1d]` are in `ar2`.
See Also
--------
isin : Version of this function that preserves the
shape of ar1.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
However, this idea fails if `ar2` is a set, or similar (non-sequence)
container: As ``ar2`` is converted to an array, in those cases
``asarray(ar2)`` is an object array rather than the expected array of
contained values.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True])
>>> test[mask]
array([0, 2, 0])
>>> mask = np.in1d(test, states, invert=True)
>>> mask
array([False, True, False, True, False])
>>> test[mask]
array([1, 5])
"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# Ensure that iteration through object arrays yields size-1 arrays
if ar2.dtype == object:
ar2 = ar2.reshape(-1, 1)
# Check if one of the arrays may contain arbitrary objects
contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
# This code is run when
# a) the first condition is true, making the code significantly faster
# b) the second condition is true (i.e. `ar1` or `ar2` may contain
# arbitrary objects), since then sorting is not guaranteed to work
if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
if invert:
mask = np.ones(len(ar1), dtype=bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
ret = np.empty(ar.shape, dtype=bool)
ret[order] = flag
if assume_unique:
return ret[:len(ar1)]
else:
return ret[rev_idx]
def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None):
return (element, test_elements)
@array_function_dispatch(_isin_dispatcher)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input array.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is an array or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted, as if
calculating `element not in test_elements`. Default is False.
``np.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``np.invert(np.isin(a, b))``.
Returns
-------
isin : ndarray, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to arrays if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object array with one element, rather than an
array of the values contained in `test_elements`. This is a consequence
of the `array` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
.. versionadded:: 1.13.0
Examples
--------
>>> element = 2*np.arange(4).reshape((2, 2))
>>> element
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = np.isin(element, test_elements)
>>> mask
array([[False, True],
[ True, False]])
>>> element[mask]
array([2, 4])
The indices of the matched values can be obtained with `nonzero`:
>>> np.nonzero(mask)
(array([0, 1]), array([1, 0]))
The test can also be inverted:
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
[False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> np.isin(element, test_set)
array([[False, False],
[False, False]])
Casting the set to a list gives the expected result:
>>> np.isin(element, list(test_set))
array([[False, True],
[ True, False]])
"""
element = np.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def _union1d_dispatcher(ar1, ar2):
return (ar1, ar2)
@array_function_dispatch(_union1d_dispatcher)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2), axis=None))
def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
return (ar1, ar2)
@array_function_dispatch(_setdiff1d_dispatcher)
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setdiff1d : ndarray
1D array of values in `ar1` that are not in `ar2`. The result
is sorted when `assume_unique=False`, but otherwise only sorted
if the input is sorted.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if assume_unique:
ar1 = np.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
| bsd-3-clause |
bottompawn/kbengine | kbe/res/scripts/common/Lib/random.py | 91 | 26084 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
# Seed with enough bytes to span the 19937 bit
# state space for the Mersenne Twister
a = int.from_bytes(_urandom(2500), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| lgpl-3.0 |
shashidharatd/test-infra | images/bazelbuild/coalesce.py | 4 | 3183 | #!/usr/bin/env python2
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coalesces bazel test results into one file."""
import argparse
import os
import re
import xml.etree.ElementTree as ET
BAZEL_FAILURE_HEADER = '''exec ${PAGER:-/usr/bin/less} "$0" || exit 1
-----------------------------------------------------------------------------
'''
# from https://www.w3.org/TR/xml11/#charsets
# RestrictedChar ::= [#x1-#x8]|[#xB-#xC]|[#xE-#x1F]|[#x7F-#x84]|[#x86-#x9F]
RESTRICTED_XML_CHARS_RE = re.compile(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x84\x86-\x9F]')
ANSI_ESCAPE_CODES_RE = re.compile(r'\033\[[\d;]*[@-~]')
def test_packages(root):
"""Yields test package directories under root."""
for package, _, files in os.walk(root):
if 'test.xml' in files and 'test.log' in files:
yield package
def sanitize(text):
if text.startswith(BAZEL_FAILURE_HEADER):
text = text[len(BAZEL_FAILURE_HEADER):]
# ANSI escape sequences should be removed.
text = ANSI_ESCAPE_CODES_RE.sub('', text)
# And any other badness that slips through.
text = RESTRICTED_XML_CHARS_RE.sub('', text)
return text
def result(pkg):
"""Given a directory, create a testcase element describing it."""
elem = ET.Element('testcase')
elem.set('classname', 'go_test')
pkg_parts = pkg.split('/')
elem.set('name', '//%s:%s' % ('/'.join(pkg_parts[1:-1]), pkg_parts[-1]))
elem.set('time', '0')
suites = ET.parse(pkg + '/test.xml').getroot()
for suite in suites:
for case in suite:
for status in case:
if status.tag == 'error' or status.tag == 'failure':
failure = ET.Element('failure')
with open(pkg + '/test.log') as fp:
text = fp.read().decode('utf8', 'ignore')
failure.text = sanitize(text)
elem.append(failure)
return elem
def main():
root = ET.Element('testsuite')
root.set('time', '0')
for package in sorted(test_packages('bazel-testlogs')):
root.append(result(package))
artifacts_dir = os.path.join(os.environ.get('WORKSPACE', os.getcwd()),
'_artifacts')
try:
os.mkdir(artifacts_dir)
except OSError:
pass
with open(os.path.join(artifacts_dir, 'junit_bazel.xml'), 'w') as fp:
fp.write(ET.tostring(root, 'utf8'))
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Coalesce JUnit results.')
PARSER.add_argument('--repo_root', default='.')
ARGS = PARSER.parse_args()
os.chdir(ARGS.repo_root)
main()
| apache-2.0 |
foxwill/ol-api-tester | env/lib/python2.7/site-packages/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-2.0 |
rvanlaar/easy-transifex | src/transifex/transifex/addons/suggestions/formats.py | 1 | 4209 | # -*- coding: utf-8 -*-
"""Module for handling suggestions in resources."""
from django.conf import settings
from suggestions.models import Suggestion
from transifex.txcommon.log import logger
from transifex.resources.models import Translation, SourceEntity
from transifex.resources.formats.utils.string_utils import percent_diff
class SuggestionFormat(object):
"""Base class for suggestion formats."""
def __init__(self, resource, language, user):
self.resource = resource
self.language = language
self.user = user
def _convert_to_suggestions(self, source, dest, user=None, langs=None):
"""This function takes all translations that belong to source and
adds them as suggestion to dest. Both source and dest are
SourceEntity objects.
The langs can contain a list of all languages for which the conversion
will take place. Defaults to all available languages.
"""
if langs:
translations = Translation.objects.filter(source_entity=source,
language__in=langs, rule=5)
else:
translations = Translation.objects.filter(source_entity=source, rule=5)
for t in translations:
# Skip source language translations
if t.language == dest.resource.source_language:
continue
tr, created = Suggestion.objects.get_or_create(
string = t.string,
source_entity = dest,
language = t.language
)
# If the suggestion was created and we have a user assign him as the
# one who made the suggestion
if created and user:
tr.user = user
tr.save()
return
def create_suggestions(self, original, new):
"""Create new suggestions.
Find similar strings in original and new lists.
Args:
original: Original set of resources.
new: Set of new resources.
"""
raise NotImplementedError
def add_from_strings(self, strings):
"""Add the strings as suggestions.
Args:
strings: An iterable of strings to add as suggestions
"""
for j in strings:
# Check SE existence
try:
se = SourceEntity.objects.get(
string = j.source_entity, context = j.context or "None",
resource = self.resource
)
except SourceEntity.DoesNotExist:
logger.warning(
"Source entity %s does not exist" % j.source_entity
)
continue
Suggestion.objects.create(
string = j.translation, source_entity = se,
language = self.language
)
class KeySuggestionFormat(SuggestionFormat):
"""Class for formats the suggestions for which are based on
similarities of keys.
"""
pass
class ContentSuggestionFormat(SuggestionFormat):
"""Class for formats the suggestions of which are based on similarities
of the content.
"""
def create_suggestions(self, original, new):
iterations = len(original)*len(new)
# If it's not over the limit, then do it
if iterations < settings.MAX_STRING_ITERATIONS:
for se in original:
for ne in new:
try:
old_trans = Translation.objects.get(source_entity=se,
language=se.resource.source_language, rule=5)
new_trans = Translation.objects.get(source_entity=ne,
language=se.resource.source_language, rule=5)
except Translation.DoesNotExist:
# Source language translation should always exist
# but just in case...
continue
# find Levenshtein distance
if percent_diff(old_trans.string, new_trans.string) < settings.MAX_STRING_DISTANCE:
self._convert_to_suggestions(se, ne, self.user)
break
| bsd-2-clause |
evaschalde/odoo | addons/share/wizard/__init__.py | 448 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import share_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tylerjereddy/scipy | scipy/linalg/_matfuncs_inv_ssq.py | 21 | 27982 | """
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg.decomp_schur import schur, rsf2csf
from scipy.linalg.matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def _rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def _matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
def _adjoint(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when SciPy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] Robert M. Corless and David J. Jeffrey,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
David J. Jeffrey and Stephen M. Watt,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as formulated in the reference does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this constraint is relaxed.
References
----------
.. [1] Awad H. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Care has been taken to return a real number if possible when
all of the inputs are real numbers.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l2 - l1) > abs(l1 + l2) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is like Eq. (11.28) in [1]_, except the determination of whether
l1 and l2 are sufficiently far apart has been modified.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Care has been taken to return a real number if possible when
all of the inputs are real numbers.
References
----------
.. [1] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l2 - l1) > abs(l1 + l2) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
u = _unwindk(np.log(l2) - np.log(l1))
if u:
f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
The degree of the Pade approximation.
Notes
-----
This subroutine appears as a chunk of lines within
a couple of published algorithms; for example it appears
as lines 4--35 in algorithm (3.1) of [1]_, and
as lines 3--34 in algorithm (4.1) of [2]_.
The instances of 'goto line 38' in algorithm (3.1) of [1]_
probably mean 'goto line 36' and have been intepreted accordingly.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T0.shape
T = T0
# Find s0, the smallest s such that the spectral radius
# of a certain diagonal matrix is at most theta[7].
# Note that because theta[7] < 1,
# this search will not terminate if any diagonal entry of T is zero.
s0 = 0
tmp_diag = np.diag(T)
if np.count_nonzero(tmp_diag) != n:
raise Exception('internal inconsistency')
while np.max(np.absolute(tmp_diag - 1)) > theta[7]:
tmp_diag = np.sqrt(tmp_diag)
s0 += 1
# Take matrix square roots of T.
for i in range(s0):
T = _sqrtm_triu(T)
# Flow control in this section is a little odd.
# This is because I am translating algorithm descriptions
# which have GOTOs in the publication.
s = s0
k = 0
d2 = _onenormest_m1_power(T, 2) ** (1/2)
d3 = _onenormest_m1_power(T, 3) ** (1/3)
a2 = max(d2, d3)
m = None
for i in (1, 2):
if a2 <= theta[i]:
m = i
break
while m is None:
if s > s0:
d3 = _onenormest_m1_power(T, 3) ** (1/3)
d4 = _onenormest_m1_power(T, 4) ** (1/4)
a3 = max(d3, d4)
if a3 <= theta[7]:
j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
if j1 <= 6:
m = j1
break
elif a3 / 2 <= theta[5] and k < 2:
k += 1
T = _sqrtm_triu(T)
s += 1
continue
d5 = _onenormest_m1_power(T, 5) ** (1/5)
a4 = max(d4, d5)
eta = min(a3, a4)
for i in (6, 7):
if eta <= theta[i]:
m = i
break
if m is not None:
break
T = _sqrtm_triu(T)
s += 1
# The subtraction of the identity is redundant here,
# because the diagonal will be replaced for improved numerical accuracy,
# but this formulation should help clarify the meaning of R.
R = T - np.identity(n)
# Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
# using formulas that have less subtractive cancellation.
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
for j in range(n):
a = T0[j, j]
r = _briggs_helper_function(a, s)
R[j, j] = r
p = np.exp2(-s)
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
R[j, j+1] = f12
# Return the T-I matrix, the number of square roots, and the Pade degree.
if not np.array_equal(R, np.triu(R)):
raise Exception('internal inconsistency')
return R, s, m
def _fractional_power_pade_constant(i, t):
# A helper function for matrix fractional power.
if i < 1:
raise ValueError('expected a positive integer i')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
if i == 1:
return -t
elif i % 2 == 0:
j = i // 2
return (-j + t) / (2 * (2*j - 1))
elif i % 2 == 1:
j = (i - 1) // 2
return (-j - t) / (2 * (2*j + 1))
else:
raise Exception('internal error')
def _fractional_power_pade(R, t, m):
"""
Evaluate the Pade approximation of a fractional matrix power.
Evaluate the degree-m Pade approximation of R
to the fractional matrix power t using the continued fraction
in bottom-up fashion using algorithm (4.1) in [1]_.
Parameters
----------
R : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
m : positive integer
Degree of Pade approximation.
Returns
-------
U : (N, N) array_like
The degree-m Pade approximation of R to the fractional power t.
This matrix will be upper triangular.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if m < 1 or int(m) != m:
raise ValueError('expected a positive integer m')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
R = np.asarray(R)
if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = R.shape
ident = np.identity(n)
Y = R * _fractional_power_pade_constant(2*m, t)
for j in range(2*m - 1, 0, -1):
rhs = R * _fractional_power_pade_constant(j, t)
Y = solve_triangular(ident + Y, rhs)
U = ident + Y
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power_triu(T, t):
"""
Compute a fractional power of an upper triangular matrix.
The fractional power is restricted to fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
m_to_theta = {
1: 1.51e-5,
2: 2.24e-3,
3: 1.88e-2,
4: 6.04e-2,
5: 1.24e-1,
6: 2.00e-1,
7: 2.79e-1,
}
n, n = T.shape
T0 = T
T0_diag = np.diag(T0)
if np.array_equal(T0, np.diag(T0_diag)):
U = np.diag(T0_diag ** t)
else:
R, s, m = _inverse_squaring_helper(T0, m_to_theta)
# Evaluate the Pade approximation.
# Note that this function expects the negative of the matrix
# returned by the inverse squaring helper.
U = _fractional_power_pade(-R, t, m)
# Undo the inverse scaling and squaring.
# Be less clever about this
# if the principal branch does not exist at T0;
# this happens when a diagonal entry of T0
# is negative with imaginary part 0.
eivals = np.diag(T0)
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
for i in range(s, -1, -1):
if i < s:
U = U.dot(U)
else:
if has_principal_branch:
p = t * np.exp2(-i)
U[np.diag_indices(n)] = T0_diag ** p
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
U[j, j+1] = f12
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _remainder_matrix_power(A, t):
"""
Compute the fractional power of a matrix, for fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if np.count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def _fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
See the fractional_matrix_power docstring in matfuncs.py for more info.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
return T
def _logm(A):
"""
Compute the matrix logarithm.
See the logm docstring in matfuncs.py for more info.
Notes
-----
In this function we look at triangular matrices that are similar
to the input matrix. If any diagonal entry of such a triangular matrix
is exactly zero then the original matrix is singular.
The matrix logarithm does not exist for such matrices,
but in such cases we will pretend that the diagonal entries that are zero
are actually slightly positive by an ad-hoc amount, in the interest
of returning something more useful than NaN. This will cause a warning.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A)) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
except (SqrtmError, LogmError):
X = np.empty_like(A)
X.fill(np.nan)
return X
| bsd-3-clause |
rlr/fjord | vendor/packages/translate-toolkit/translate/convert/prop2po.py | 3 | 12113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2010,2012 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Java/Mozilla .properties files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/prop2po.html
for examples and usage instructions.
"""
import logging
import sys
from translate.storage import po, properties
logger = logging.getLogger(__name__)
def _collapse(store, units):
sources = [u.source for u in units]
targets = [u.target for u in units]
# TODO: only consider the right ones for sources and targets
plural_unit = store.addsourceunit(sources)
plural_unit.target = targets
return plural_unit
class prop2po:
"""convert a .properties file to a .po file for handling the
translation."""
def convertstore(self, thepropfile, personality="java",
duplicatestyle="msgctxt"):
"""converts a .properties file to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality in ("mozilla", "skype"):
targetheader = thetargetfile.init_headers(
x_accelerator_marker="&",
x_merge_on="location",
)
else:
targetheader = thetargetfile.header()
targetheader.addnote("extracted from %s" % thepropfile.filename,
"developer")
# we try and merge the header po with any comments at the start of the
# properties file
appendedheader = False
waitingcomments = []
for propunit in thepropfile.units:
pounit = self.convertunit(propunit, "developer")
if pounit is None:
waitingcomments.extend(propunit.comments)
# FIXME the storage class should not be creating blank units
if pounit is "discard":
continue
if not appendedheader:
if propunit.isblank():
targetheader.addnote("\n".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
pounit = None
appendedheader = True
if pounit is not None:
pounit.addnote("\n".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(pounit)
if self.personality == "gaia":
thetargetfile = self.fold_gaia_plurals(thetargetfile)
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def mergestore(self, origpropfile, translatedpropfile, personality="java",
blankmsgstr=False, duplicatestyle="msgctxt"):
"""converts two .properties files to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality in ("mozilla", "skype"):
targetheader = thetargetfile.init_headers(
x_accelerator_marker="&",
x_merge_on="location",
)
else:
targetheader = thetargetfile.header()
targetheader.addnote("extracted from %s, %s" % (origpropfile.filename, translatedpropfile.filename),
"developer")
translatedpropfile.makeindex()
# we try and merge the header po with any comments at the start of
# the properties file
appendedheader = False
waitingcomments = []
# loop through the original file, looking at units one by one
for origprop in origpropfile.units:
origpo = self.convertunit(origprop, "developer")
if origpo is None:
waitingcomments.extend(origprop.comments)
# FIXME the storage class should not be creating blank units
if origpo is "discard":
continue
# handle the header case specially...
if not appendedheader:
if origprop.isblank():
targetheader.addnote(u"".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
origpo = None
appendedheader = True
# try and find a translation of the same name...
if origprop.name in translatedpropfile.locationindex:
translatedprop = translatedpropfile.locationindex[origprop.name]
# Need to check that this comment is not a copy of the
# developer comments
translatedpo = self.convertunit(translatedprop, "translator")
if translatedpo is "discard":
continue
else:
translatedpo = None
# if we have a valid po unit, get the translation and add it...
if origpo is not None:
if translatedpo is not None and not blankmsgstr:
origpo.target = translatedpo.source
origpo.addnote(u"".join(waitingcomments).rstrip(),
"developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(origpo)
elif translatedpo is not None:
logger.error("didn't convert original property definition '%s'",
origprop.name)
if self.personality == "gaia":
thetargetfile = self.fold_gaia_plurals(thetargetfile)
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def fold_gaia_plurals(self, postore):
"""Fold the multiple plural units of a gaia file into a gettext plural."""
new_store = type(postore)()
plurals = {}
current_plural = u""
for unit in postore.units:
if not unit.istranslatable():
#TODO: reconsider: we could lose header comments here
continue
if u"plural(n)" in unit.source:
# start of a set of plural units
location = unit.getlocations()[0]
current_plural = location
plurals[location] = []
# We ignore the first one, since it doesn't contain translatable
# text, only a marker.
else:
location = unit.getlocations()[0]
if current_plural and location.startswith(current_plural):
plurals[current_plural].append(unit)
if not '[zero]' in location:
# We want to keep [zero] cases separately translatable
continue
elif current_plural:
# End of a set of plural units
new_unit = _collapse(new_store, plurals[current_plural])
new_unit.addlocation(current_plural)
del plurals[current_plural]
current_plural = u""
new_store.addunit(unit)
if current_plural:
# The file ended with a set of plural units
new_unit = _collapse(new_store, plurals[current_plural])
new_unit.addlocation(current_plural)
del plurals[current_plural]
current_plural = u""
# if everything went well, there should be nothing left in plurals
if len(plurals) != 0:
logger.warning("Not all plural units converted correctly:" +
"\n".join(plurals.keys()))
return new_store
def convertunit(self, propunit, commenttype):
"""Converts a .properties unit to a .po unit. Returns None if empty
or not for translation."""
if propunit is None:
return None
# escape unicode
pounit = po.pounit(encoding="UTF-8")
if hasattr(propunit, "comments"):
for comment in propunit.comments:
if "DONT_TRANSLATE" in comment:
return "discard"
pounit.addnote(u"".join(propunit.getnotes()).rstrip(), commenttype)
# TODO: handle multiline msgid
if propunit.isblank():
return None
pounit.addlocation(propunit.name)
pounit.source = propunit.source
pounit.target = u""
return pounit
def convertstrings(inputfile, outputfile, templatefile, personality="strings",
pot=False, duplicatestyle="msgctxt", encoding=None):
""".strings specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="strings", pot=pot,
duplicatestyle=duplicatestyle, encoding=encoding)
def convertmozillaprop(inputfile, outputfile, templatefile, pot=False,
duplicatestyle="msgctxt"):
"""Mozilla specific convertor function"""
return convertprop(inputfile, outputfile, templatefile,
personality="mozilla", pot=pot,
duplicatestyle=duplicatestyle)
def convertprop(inputfile, outputfile, templatefile, personality="java",
pot=False, duplicatestyle="msgctxt", encoding=None):
"""reads in inputfile using properties, converts using prop2po, writes
to outputfile"""
inputstore = properties.propfile(inputfile, personality, encoding)
convertor = prop2po()
if templatefile is None:
outputstore = convertor.convertstore(inputstore, personality,
duplicatestyle=duplicatestyle)
else:
templatestore = properties.propfile(templatefile, personality, encoding)
outputstore = convertor.mergestore(templatestore, inputstore,
personality, blankmsgstr=pot,
duplicatestyle=duplicatestyle)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
formats = {
"properties": ("po", convertprop),
("properties", "properties"): ("po", convertprop),
"lang": ("po", convertprop),
("lang", "lang"): ("po", convertprop),
"strings": ("po", convertstrings),
("strings", "strings"): ("po", convertstrings),
}
def main(argv=None):
from translate.convert import convert
parser = convert.ConvertOptionParser(formats, usetemplates=True,
usepots=True,
description=__doc__)
parser.add_option("", "--personality", dest="personality",
default=properties.default_dialect,
type="choice",
choices=properties.dialects.keys(),
help="override the input file format: %s (for .properties files, default: %s)" %
(", ".join(properties.dialects.iterkeys()),
properties.default_dialect),
metavar="TYPE")
parser.add_option("", "--encoding", dest="encoding", default=None,
help="override the encoding set by the personality",
metavar="ENCODING")
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.passthrough.append("personality")
parser.passthrough.append("encoding")
parser.run(argv)
if __name__ == '__main__':
main()
| bsd-3-clause |
rubencabrera/odoo | addons/l10n_pl/__openerp__.py | 277 | 2158 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 - now Grzegorz Grzelak grzegorz.grzelak@openglobe.pl
# All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Poland - Accounting',
'version' : '1.02',
'author' : 'Grzegorz Grzelak (OpenGLOBE)',
'website': 'http://www.openglobe.pl',
'category' : 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart and taxes for Poland in OpenERP.
==================================================================================
To jest moduł do tworzenia wzorcowego planu kont, podatków, obszarów podatkowych i
rejestrów podatkowych. Moduł ustawia też konta do kupna i sprzedaży towarów
zakładając, że wszystkie towary są w obrocie hurtowym.
Niniejszy moduł jest przeznaczony dla odoo 8.0.
Wewnętrzny numer wersji OpenGLOBE 1.02
""",
'depends' : ['account', 'base_iban', 'base_vat', 'account_chart'],
'demo' : [],
'data' : ['account_tax_code.xml',
'account_chart.xml',
'account_tax.xml',
'fiscal_position.xml',
'country_pl.xml',
'l10n_chart_pl_wizard.xml'
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yaii/yai | share/extensions/render_barcode_qrcode.py | 3 | 31528 | #!/usr/bin/env python
import math, sys
import inkex
inkex.localize()
#QRCode for Python
#
#Ported from the Javascript library by Sam Curren
#
#QRCode for Javascript
#http://d-project.googlecode.com/svn/trunk/misc/qrcode/js/qrcode.js
#
#Copyright (c) 2009 Kazuhiko Arase
#
#URL: http://www.d-project.com/
#
# Copyright (c) 2010 buliabyak@gmail.com:
# adapting for Inkscape extension, SVG output, Auto mode
#
#Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
# The word "QR Code" is registered trademark of
# DENSO WAVE INCORPORATED
# http://www.denso-wave.com/qrcode/faqpatent-e.html
class QR8bitByte:
def __init__(self, data):
self.mode = QRMode.MODE_8BIT_BYTE
self.data = data
def getLength(self):
return len(self.data)
def write(self, buffer):
for i in range(len(self.data)):
#// not JIS ...
buffer.put(ord(self.data[i]), 8)
def __repr__(self):
return self.data
class QRCode:
def __init__(self, typeNumber, errorCorrectLevel):
self.typeNumber = typeNumber
self.errorCorrectLevel = errorCorrectLevel
self.modules = None
self.moduleCount = 0
self.dataCache = None
self.dataList = []
def addData(self, data):
newData = QR8bitByte(data)
self.dataList.append(newData)
self.dataCache = None
def isDark(self, row, col):
if (row < 0 or self.moduleCount <= row or col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
return self.modules[row][col]
def getModuleCount(self):
return self.moduleCount
def make(self):
self.makeImpl(False, self.getBestMaskPattern() )
def makeImpl(self, test, maskPattern):
if self.typeNumber == 0:
self.typeNumber = QRCode.autoNumber(self.errorCorrectLevel, self.dataList)
self.moduleCount = self.typeNumber * 4 + 17
self.modules = [None for x in range(self.moduleCount)]
for row in range(self.moduleCount):
self.modules[row] = [None for x in range(self.moduleCount)]
for col in range(self.moduleCount):
self.modules[row][col] = None #//(col + row) % 3;
self.setupPositionProbePattern(0, 0)
self.setupPositionProbePattern(self.moduleCount - 7, 0)
self.setupPositionProbePattern(0, self.moduleCount - 7)
self.setupPositionAdjustPattern()
self.setupTimingPattern()
self.setupTypeInfo(test, maskPattern)
if (self.typeNumber >= 7):
self.setupTypeNumber(test)
if (self.dataCache == None):
self.dataCache = QRCode.createData(self.typeNumber, self.errorCorrectLevel, self.dataList)
self.mapData(self.dataCache, maskPattern)
def setupPositionProbePattern(self, row, col):
for r in range(-1, 8):
if (row + r <= -1 or self.moduleCount <= row + r): continue
for c in range(-1, 8):
if (col + c <= -1 or self.moduleCount <= col + c): continue
if ( (0 <= r and r <= 6 and (c == 0 or c == 6) )
or (0 <= c and c <= 6 and (r == 0 or r == 6) )
or (2 <= r and r <= 4 and 2 <= c and c <= 4) ):
self.modules[row + r][col + c] = True;
else:
self.modules[row + r][col + c] = False;
def getBestMaskPattern(self):
minLostPoint = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i);
lostPoint = QRUtil.getLostPoint(self);
if (i == 0 or minLostPoint > lostPoint):
minLostPoint = lostPoint
pattern = i
return pattern
def makeSVG(self, grp, boxsize):
margin = 4
pixelsize = (self.getModuleCount() + 2*margin) * boxsize #self.getModuleCount() * boxsize
# white background providing margin:
rect = inkex.etree.SubElement(grp, inkex.addNS('rect', 'svg'))
rect.set('x', '0')
rect.set('y', '0')
rect.set('width', str(pixelsize))
rect.set('height', str(pixelsize))
rect.set('style', 'fill:white;stroke:none')
for r in range(self.getModuleCount()):
for c in range(self.getModuleCount()):
if (self.isDark(r, c) ):
x = (c + margin) * boxsize
y = (r + margin) * boxsize
rect = inkex.etree.SubElement(grp, inkex.addNS('rect', 'svg'))
rect.set('x', str(x))
rect.set('y', str(y))
rect.set('width', str(boxsize))
rect.set('height', str(boxsize))
rect.set('style', 'fill:black;stroke:none')
def setupTimingPattern(self):
for r in range(8, self.moduleCount - 8):
if (self.modules[r][6] != None):
continue
self.modules[r][6] = (r % 2 == 0)
for c in range(8, self.moduleCount - 8):
if (self.modules[6][c] != None):
continue
self.modules[6][c] = (c % 2 == 0)
def setupPositionAdjustPattern(self):
pos = QRUtil.getPatternPosition(self.typeNumber)
for i in range(len(pos)):
for j in range(len(pos)):
row = pos[i]
col = pos[j]
if (self.modules[row][col] != None):
continue
for r in range(-2, 3):
for c in range(-2, 3):
if (r == -2 or r == 2 or c == -2 or c == 2 or (r == 0 and c == 0) ):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def setupTypeNumber(self, test):
bits = QRUtil.getBCHTypeNumber(self.typeNumber)
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.moduleCount - 8 - 3] = mod;
for i in range(18):
mod = (not test and ( (bits >> i) & 1) == 1)
self.modules[i % 3 + self.moduleCount - 8 - 3][i // 3] = mod;
def setupTypeInfo(self, test, maskPattern):
data = (self.errorCorrectLevel << 3) | maskPattern
bits = QRUtil.getBCHTypeInfo(data)
#// vertical
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1)
if (i < 6):
self.modules[i][8] = mod
elif (i < 8):
self.modules[i + 1][8] = mod
else:
self.modules[self.moduleCount - 15 + i][8] = mod
#// horizontal
for i in range(15):
mod = (not test and ( (bits >> i) & 1) == 1);
if (i < 8):
self.modules[8][self.moduleCount - i - 1] = mod
elif (i < 9):
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
#// fixed module
self.modules[self.moduleCount - 8][8] = (not test)
def mapData(self, data, maskPattern):
inc = -1
row = self.moduleCount - 1
bitIndex = 7
byteIndex = 0
for col in range(self.moduleCount - 1, 0, -2):
if (col == 6): col-=1
while (True):
for c in range(2):
if (self.modules[row][col - c] == None):
dark = False
if (byteIndex < len(data)):
dark = ( ( (data[byteIndex] >> bitIndex) & 1) == 1)
mask = QRUtil.getMask(maskPattern, row, col - c)
if (mask):
dark = not dark
self.modules[row][col - c] = dark
bitIndex-=1
if (bitIndex == -1):
byteIndex+=1
bitIndex = 7
row += inc
if (row < 0 or self.moduleCount <= row):
row -= inc
inc = -inc
break
PAD0 = 0xEC
PAD1 = 0x11
@staticmethod
def autoNumber(errorCorrectLevel, dataList):
for tn in range (1, 40):
rsBlocks = QRRSBlock.getRSBlocks(tn, errorCorrectLevel)
buffer = QRBitBuffer();
for i in range(len(dataList)):
data = dataList[i]
buffer.put(data.mode, 4)
buffer.put(data.getLength(), QRUtil.getLengthInBits(data.mode, tn) )
data.write(buffer)
#// calc num max data.
totalDataCount = 0;
for i in range(len(rsBlocks)):
totalDataCount += rsBlocks[i].dataCount
if (buffer.getLengthInBits() <= totalDataCount * 8):
return tn
inkex.errormsg("Even the largest size won't take this much data ("
+ str(buffer.getLengthInBits())
+ ">"
+ str(totalDataCount * 8)
+ ")")
sys.exit()
@staticmethod
def createData(typeNumber, errorCorrectLevel, dataList):
rsBlocks = QRRSBlock.getRSBlocks(typeNumber, errorCorrectLevel)
buffer = QRBitBuffer();
for i in range(len(dataList)):
data = dataList[i]
buffer.put(data.mode, 4)
buffer.put(data.getLength(), QRUtil.getLengthInBits(data.mode, typeNumber) )
data.write(buffer)
#// calc num max data.
totalDataCount = 0;
for i in range(len(rsBlocks)):
totalDataCount += rsBlocks[i].dataCount
if (buffer.getLengthInBits() > totalDataCount * 8):
inkex.errormsg("Text is too long for this size ("
+ str(buffer.getLengthInBits())
+ ">"
+ str(totalDataCount * 8)
+ ")")
sys.exit()
#// end code
if (buffer.getLengthInBits() + 4 <= totalDataCount * 8):
buffer.put(0, 4)
#// padding
while (buffer.getLengthInBits() % 8 != 0):
buffer.putBit(False)
#// padding
while (True):
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD0, 8)
if (buffer.getLengthInBits() >= totalDataCount * 8):
break
buffer.put(QRCode.PAD1, 8)
return QRCode.createBytes(buffer, rsBlocks)
@staticmethod
def createBytes(buffer, rsBlocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata = [0 for x in range(len(rsBlocks))]
ecdata = [0 for x in range(len(rsBlocks))]
for r in range(len(rsBlocks)):
dcCount = rsBlocks[r].dataCount
ecCount = rsBlocks[r].totalCount - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata[r] = [0 for x in range(dcCount)]
for i in range(len(dcdata[r])):
dcdata[r][i] = 0xff & buffer.buffer[i + offset]
offset += dcCount
rsPoly = QRUtil.getErrorCorrectPolynomial(ecCount)
rawPoly = QRPolynomial(dcdata[r], rsPoly.getLength() - 1)
modPoly = rawPoly.mod(rsPoly)
ecdata[r] = [0 for x in range(rsPoly.getLength()-1)]
for i in range(len(ecdata[r])):
modIndex = i + modPoly.getLength() - len(ecdata[r])
if (modIndex >= 0):
ecdata[r][i] = modPoly.get(modIndex)
else:
ecdata[r][i] = 0
totalCodeCount = 0
for i in range(len(rsBlocks)):
totalCodeCount += rsBlocks[i].totalCount
data = [None for x in range(totalCodeCount)]
index = 0
for i in range(maxDcCount):
for r in range(len(rsBlocks)):
if (i < len(dcdata[r])):
data[index] = dcdata[r][i]
index+=1
for i in range(maxEcCount):
for r in range(len(rsBlocks)):
if (i < len(ecdata[r])):
data[index] = ecdata[r][i]
index+=1
return data
class QRMode:
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
class QRErrorCorrectLevel:
L = 1
M = 0
Q = 3
H = 2
class QRMaskPattern:
PATTERN000 = 0
PATTERN001 = 1
PATTERN010 = 2
PATTERN011 = 3
PATTERN100 = 4
PATTERN101 = 5
PATTERN110 = 6
PATTERN111 = 7
class QRUtil(object):
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) | (1 << 0)
G18 = (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) | (1 << 2) | (1 << 0)
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
@staticmethod
def getBCHTypeInfo(data):
d = data << 10;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0):
d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) ) )
return ( (data << 10) | d) ^ QRUtil.G15_MASK
@staticmethod
def getBCHTypeNumber(data):
d = data << 12;
while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0):
d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) ) )
return (data << 12) | d
@staticmethod
def getBCHDigit(data):
digit = 0;
while (data != 0):
digit += 1
data >>= 1
return digit
@staticmethod
def getPatternPosition(typeNumber):
return QRUtil.PATTERN_POSITION_TABLE[typeNumber - 1]
@staticmethod
def getMask(maskPattern, i, j):
if maskPattern == QRMaskPattern.PATTERN000 : return (i + j) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN001 : return i % 2 == 0
if maskPattern == QRMaskPattern.PATTERN010 : return j % 3 == 0
if maskPattern == QRMaskPattern.PATTERN011 : return (i + j) % 3 == 0
if maskPattern == QRMaskPattern.PATTERN100 : return (math.floor(i / 2) + math.floor(j / 3) ) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN101 : return (i * j) % 2 + (i * j) % 3 == 0
if maskPattern == QRMaskPattern.PATTERN110 : return ( (i * j) % 2 + (i * j) % 3) % 2 == 0
if maskPattern == QRMaskPattern.PATTERN111 : return ( (i * j) % 3 + (i + j) % 2) % 2 == 0
raise Exception("bad maskPattern:" + maskPattern);
@staticmethod
def getErrorCorrectPolynomial(errorCorrectLength):
a = QRPolynomial([1], 0);
for i in range(errorCorrectLength):
a = a.multiply(QRPolynomial([1, QRMath.gexp(i)], 0) )
return a
@staticmethod
def getLengthInBits(mode, type):
if 1 <= type and type < 10:
#// 1 - 9
if mode == QRMode.MODE_NUMBER : return 10
if mode == QRMode.MODE_ALPHA_NUM : return 9
if mode == QRMode.MODE_8BIT_BYTE : return 8
if mode == QRMode.MODE_KANJI : return 8
raise Exception("mode:" + mode)
elif (type < 27):
#// 10 - 26
if mode == QRMode.MODE_NUMBER : return 12
if mode == QRMode.MODE_ALPHA_NUM : return 11
if mode == QRMode.MODE_8BIT_BYTE : return 16
if mode == QRMode.MODE_KANJI : return 10
raise Exception("mode:" + mode)
elif (type < 41):
#// 27 - 40
if mode == QRMode.MODE_NUMBER : return 14
if mode == QRMode.MODE_ALPHA_NUM : return 13
if mode == QRMode.MODE_8BIT_BYTE : return 16
if mode == QRMode.MODE_KANJI : return 12
raise Exception("mode:" + mode)
else:
raise Exception("type:" + type)
@staticmethod
def getLostPoint(qrCode):
moduleCount = qrCode.getModuleCount();
lostPoint = 0;
#// LEVEL1
for row in range(moduleCount):
for col in range(moduleCount):
sameCount = 0;
dark = qrCode.isDark(row, col);
for r in range(-1, 2):
if (row + r < 0 or moduleCount <= row + r):
continue
for c in range(-1, 2):
if (col + c < 0 or moduleCount <= col + c):
continue
if (r == 0 and c == 0):
continue
if (dark == qrCode.isDark(row + r, col + c) ):
sameCount+=1
if (sameCount > 5):
lostPoint += (3 + sameCount - 5)
#// LEVEL2
for row in range(moduleCount - 1):
for col in range(moduleCount - 1):
count = 0;
if (qrCode.isDark(row, col ) ): count+=1
if (qrCode.isDark(row + 1, col ) ): count+=1
if (qrCode.isDark(row, col + 1) ): count+=1
if (qrCode.isDark(row + 1, col + 1) ): count+=1
if (count == 0 or count == 4):
lostPoint += 3
#// LEVEL3
for row in range(moduleCount):
for col in range(moduleCount - 6):
if (qrCode.isDark(row, col)
and not qrCode.isDark(row, col + 1)
and qrCode.isDark(row, col + 2)
and qrCode.isDark(row, col + 3)
and qrCode.isDark(row, col + 4)
and not qrCode.isDark(row, col + 5)
and qrCode.isDark(row, col + 6) ):
lostPoint += 40
for col in range(moduleCount):
for row in range(moduleCount - 6):
if (qrCode.isDark(row, col)
and not qrCode.isDark(row + 1, col)
and qrCode.isDark(row + 2, col)
and qrCode.isDark(row + 3, col)
and qrCode.isDark(row + 4, col)
and not qrCode.isDark(row + 5, col)
and qrCode.isDark(row + 6, col) ):
lostPoint += 40
#// LEVEL4
darkCount = 0;
for col in range(moduleCount):
for row in range(moduleCount):
if (qrCode.isDark(row, col) ):
darkCount+=1
ratio = abs(100 * darkCount / moduleCount / moduleCount - 50) / 5
lostPoint += ratio * 10
return lostPoint
class QRMath:
@staticmethod
def glog(n):
if (n < 1):
raise Exception("glog(" + n + ")")
return LOG_TABLE[n];
@staticmethod
def gexp(n):
while n < 0:
n += 255
while n >= 256:
n -= 255
return EXP_TABLE[n];
EXP_TABLE = [x for x in range(256)]
LOG_TABLE = [x for x in range(256)]
for i in range(8):
EXP_TABLE[i] = 1 << i;
for i in range(8, 256):
EXP_TABLE[i] = EXP_TABLE[i - 4] ^ EXP_TABLE[i - 5] ^ EXP_TABLE[i - 6] ^ EXP_TABLE[i - 8]
for i in range(255):
LOG_TABLE[EXP_TABLE[i] ] = i
class QRPolynomial:
def __init__(self, num, shift):
if (len(num) == 0):
raise Exception(num.length + "/" + shift)
offset = 0
while offset < len(num) and num[offset] == 0:
offset += 1
self.num = [0 for x in range(len(num)-offset+shift)]
for i in range(len(num) - offset):
self.num[i] = num[i + offset]
def get(self, index):
return self.num[index]
def getLength(self):
return len(self.num)
def multiply(self, e):
num = [0 for x in range(self.getLength() + e.getLength() - 1)];
for i in range(self.getLength()):
for j in range(e.getLength()):
num[i + j] ^= QRMath.gexp(QRMath.glog(self.get(i) ) + QRMath.glog(e.get(j) ) )
return QRPolynomial(num, 0);
def mod(self, e):
if (self.getLength() - e.getLength() < 0):
return self;
ratio = QRMath.glog(self.get(0) ) - QRMath.glog(e.get(0) )
num = [0 for x in range(self.getLength())]
for i in range(self.getLength()):
num[i] = self.get(i);
for i in range(e.getLength()):
num[i] ^= QRMath.gexp(QRMath.glog(e.get(i) ) + ratio)
# recursive call
return QRPolynomial(num, 0).mod(e);
class QRRSBlock:
RS_BLOCK_TABLE = [
#// L
#// M
#// Q
#// H
#// 1
[1, 26, 19],
[1, 26, 16],
[1, 26, 13],
[1, 26, 9],
#// 2
[1, 44, 34],
[1, 44, 28],
[1, 44, 22],
[1, 44, 16],
#// 3
[1, 70, 55],
[1, 70, 44],
[2, 35, 17],
[2, 35, 13],
#// 4
[1, 100, 80],
[2, 50, 32],
[2, 50, 24],
[4, 25, 9],
#// 5
[1, 134, 108],
[2, 67, 43],
[2, 33, 15, 2, 34, 16],
[2, 33, 11, 2, 34, 12],
#// 6
[2, 86, 68],
[4, 43, 27],
[4, 43, 19],
[4, 43, 15],
#// 7
[2, 98, 78],
[4, 49, 31],
[2, 32, 14, 4, 33, 15],
[4, 39, 13, 1, 40, 14],
#// 8
[2, 121, 97],
[2, 60, 38, 2, 61, 39],
[4, 40, 18, 2, 41, 19],
[4, 40, 14, 2, 41, 15],
#// 9
[2, 146, 116],
[3, 58, 36, 2, 59, 37],
[4, 36, 16, 4, 37, 17],
[4, 36, 12, 4, 37, 13],
#// 10
[2, 86, 68, 2, 87, 69],
[4, 69, 43, 1, 70, 44],
[6, 43, 19, 2, 44, 20],
[6, 43, 15, 2, 44, 16],
# 11
[4, 101, 81],
[1, 80, 50, 4, 81, 51],
[4, 50, 22, 4, 51, 23],
[3, 36, 12, 8, 37, 13],
# 12
[2, 116, 92, 2, 117, 93],
[6, 58, 36, 2, 59, 37],
[4, 46, 20, 6, 47, 21],
[7, 42, 14, 4, 43, 15],
# 13
[4, 133, 107],
[8, 59, 37, 1, 60, 38],
[8, 44, 20, 4, 45, 21],
[12, 33, 11, 4, 34, 12],
# 14
[3, 145, 115, 1, 146, 116],
[4, 64, 40, 5, 65, 41],
[11, 36, 16, 5, 37, 17],
[11, 36, 12, 5, 37, 13],
# 15
[5, 109, 87, 1, 110, 88],
[5, 65, 41, 5, 66, 42],
[5, 54, 24, 7, 55, 25],
[11, 36, 12],
# 16
[5, 122, 98, 1, 123, 99],
[7, 73, 45, 3, 74, 46],
[15, 43, 19, 2, 44, 20],
[3, 45, 15, 13, 46, 16],
# 17
[1, 135, 107, 5, 136, 108],
[10, 74, 46, 1, 75, 47],
[1, 50, 22, 15, 51, 23],
[2, 42, 14, 17, 43, 15],
# 18
[5, 150, 120, 1, 151, 121],
[9, 69, 43, 4, 70, 44],
[17, 50, 22, 1, 51, 23],
[2, 42, 14, 19, 43, 15],
# 19
[3, 141, 113, 4, 142, 114],
[3, 70, 44, 11, 71, 45],
[17, 47, 21, 4, 48, 22],
[9, 39, 13, 16, 40, 14],
# 20
[3, 135, 107, 5, 136, 108],
[3, 67, 41, 13, 68, 42],
[15, 54, 24, 5, 55, 25],
[15, 43, 15, 10, 44, 16],
# 21
[4, 144, 116, 4, 145, 117],
[17, 68, 42],
[17, 50, 22, 6, 51, 23],
[19, 46, 16, 6, 47, 17],
# 22
[2, 139, 111, 7, 140, 112],
[17, 74, 46],
[7, 54, 24, 16, 55, 25],
[34, 37, 13],
# 23
[4, 151, 121, 5, 152, 122],
[4, 75, 47, 14, 76, 48],
[11, 54, 24, 14, 55, 25],
[16, 45, 15, 14, 46, 16],
# 24
[6, 147, 117, 4, 148, 118],
[6, 73, 45, 14, 74, 46],
[11, 54, 24, 16, 55, 25],
[30, 46, 16, 2, 47, 17],
# 25
[8, 132, 106, 4, 133, 107],
[8, 75, 47, 13, 76, 48],
[7, 54, 24, 22, 55, 25],
[22, 45, 15, 13, 46, 16],
# 26
[10, 142, 114, 2, 143, 115],
[19, 74, 46, 4, 75, 47],
[28, 50, 22, 6, 51, 23],
[33, 46, 16, 4, 47, 17],
# 27
[8, 152, 122, 4, 153, 123],
[22, 73, 45, 3, 74, 46],
[8, 53, 23, 26, 54, 24],
[12, 45, 15, 28, 46, 16],
# 28
[3, 147, 117, 10, 148, 118],
[3, 73, 45, 23, 74, 46],
[4, 54, 24, 31, 55, 25],
[11, 45, 15, 31, 46, 16],
# 29
[7, 146, 116, 7, 147, 117],
[21, 73, 45, 7, 74, 46],
[1, 53, 23, 37, 54, 24],
[19, 45, 15, 26, 46, 16],
# 30
[5, 145, 115, 10, 146, 116],
[19, 75, 47, 10, 76, 48],
[15, 54, 24, 25, 55, 25],
[23, 45, 15, 25, 46, 16],
# 31
[13, 145, 115, 3, 146, 116],
[2, 74, 46, 29, 75, 47],
[42, 54, 24, 1, 55, 25],
[23, 45, 15, 28, 46, 16],
# 32
[17, 145, 115],
[10, 74, 46, 23, 75, 47],
[10, 54, 24, 35, 55, 25],
[19, 45, 15, 35, 46, 16],
# 33
[17, 145, 115, 1, 146, 116],
[14, 74, 46, 21, 75, 47],
[29, 54, 24, 19, 55, 25],
[11, 45, 15, 46, 46, 16],
# 34
[13, 145, 115, 6, 146, 116],
[14, 74, 46, 23, 75, 47],
[44, 54, 24, 7, 55, 25],
[59, 46, 16, 1, 47, 17],
# 35
[12, 151, 121, 7, 152, 122],
[12, 75, 47, 26, 76, 48],
[39, 54, 24, 14, 55, 25],
[22, 45, 15, 41, 46, 16],
# 36
[6, 151, 121, 14, 152, 122],
[6, 75, 47, 34, 76, 48],
[46, 54, 24, 10, 55, 25],
[2, 45, 15, 64, 46, 16],
# 37
[17, 152, 122, 4, 153, 123],
[29, 74, 46, 14, 75, 47],
[49, 54, 24, 10, 55, 25],
[24, 45, 15, 46, 46, 16],
# 38
[4, 152, 122, 18, 153, 123],
[13, 74, 46, 32, 75, 47],
[48, 54, 24, 14, 55, 25],
[42, 45, 15, 32, 46, 16],
# 39
[20, 147, 117, 4, 148, 118],
[40, 75, 47, 7, 76, 48],
[43, 54, 24, 22, 55, 25],
[10, 45, 15, 67, 46, 16],
# 40
[19, 148, 118, 6, 149, 119],
[18, 75, 47, 31, 76, 48],
[34, 54, 24, 34, 55, 25],
[20, 45, 15, 61, 46, 16]
]
def __init__(self, totalCount, dataCount):
self.totalCount = totalCount
self.dataCount = dataCount
@staticmethod
def getRSBlocks(typeNumber, errorCorrectLevel):
rsBlock = QRRSBlock.getRsBlockTable(typeNumber, errorCorrectLevel);
if rsBlock == None:
raise Exception("bad rs block @ typeNumber:" + typeNumber + "/errorCorrectLevel:" + errorCorrectLevel)
length = len(rsBlock) / 3
list = []
for i in range(length):
count = rsBlock[i * 3 + 0]
totalCount = rsBlock[i * 3 + 1]
dataCount = rsBlock[i * 3 + 2]
for j in range(count):
list.append(QRRSBlock(totalCount, dataCount))
return list;
@staticmethod
def getRsBlockTable(typeNumber, errorCorrectLevel):
if errorCorrectLevel == QRErrorCorrectLevel.L:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 0];
elif errorCorrectLevel == QRErrorCorrectLevel.M:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 1];
elif errorCorrectLevel == QRErrorCorrectLevel.Q:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 2];
elif errorCorrectLevel == QRErrorCorrectLevel.H:
return QRRSBlock.RS_BLOCK_TABLE[(typeNumber - 1) * 4 + 3];
else:
return None;
class QRBitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
bufIndex = math.floor(index / 8)
val = ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
print "get ", val
return ( (self.buffer[bufIndex] >> (7 - index % 8) ) & 1) == 1
def put(self, num, length):
for i in range(length):
self.putBit( ( (num >> (length - i - 1) ) & 1) == 1)
def getLengthInBits(self):
return self.length
def putBit(self, bit):
bufIndex = self.length // 8
if len(self.buffer) <= bufIndex:
self.buffer.append(0)
if bit:
self.buffer[bufIndex] |= (0x80 >> (self.length % 8) )
self.length+=1
class QRCodeInkscape(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
#PARSE OPTIONS
self.OptionParser.add_option("--text",
action="store", type="string",
dest="TEXT", default='www.inkscape.org')
self.OptionParser.add_option("--typenumber",
action="store", type="string",
dest="TYPENUMBER", default="0")
self.OptionParser.add_option("--correctionlevel",
action="store", type="string",
dest="CORRECTIONLEVEL", default="0")
self.OptionParser.add_option("--encoding",
action="store", type="string",
dest="input_encode", default="latin_1")
self.OptionParser.add_option("--modulesize",
action="store", type="float",
dest="MODULESIZE", default=10)
def effect(self):
scale = self.unittouu('1px') # convert to document units
so = self.options
if so.TEXT == '': #abort if converting blank text
inkex.errormsg(_('Please enter an input text'))
else:
#INKSCAPE GROUP TO CONTAIN EVERYTHING
so.TEXT = unicode(so.TEXT, so.input_encode)
centre = self.view_center #Put in in the centre of the current view
grp_transform = 'translate' + str( centre ) + ' scale(%f)' % scale
grp_name = 'QR Code: '+so.TEXT
grp_attribs = {inkex.addNS('label','inkscape'):grp_name,
'transform':grp_transform }
grp = inkex.etree.SubElement(self.current_layer, 'g', grp_attribs) #the group to put everything in
#GENERATE THE QRCODE
qr = QRCode(int(so.TYPENUMBER), int(so.CORRECTIONLEVEL))
qr.addData(so.TEXT)
qr.make()
qr.makeSVG(grp, so.MODULESIZE)
if __name__ == '__main__':
e = QRCodeInkscape()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| gpl-2.0 |
JorgeCoock/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
Orav/kbengine | kbe/res/scripts/common/Lib/distutils/errors.py | 5 | 3674 | """distutils.errors
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module is safe to use in "from ... import *" mode; it only exports
symbols whose names start with "Distutils" and end with "Error"."""
class DistutilsError (Exception):
"""The root of all Distutils evil."""
pass
class DistutilsModuleError (DistutilsError):
"""Unable to load an expected module, or to find an expected class
within some module (in particular, command modules and classes)."""
pass
class DistutilsClassError (DistutilsError):
"""Some command class (or possibly distribution class, if anyone
feels a need to subclass Distribution) is found not to be holding
up its end of the bargain, ie. implementing some part of the
"command "interface."""
pass
class DistutilsGetoptError (DistutilsError):
"""The option table provided to 'fancy_getopt()' is bogus."""
pass
class DistutilsArgError (DistutilsError):
"""Raised by fancy_getopt in response to getopt.error -- ie. an
error in the command line usage."""
pass
class DistutilsFileError (DistutilsError):
"""Any problems in the filesystem: expected file not found, etc.
Typically this is for problems that we detect before OSError
could be raised."""
pass
class DistutilsOptionError (DistutilsError):
"""Syntactic/semantic errors in command options, such as use of
mutually conflicting options, or inconsistent options,
badly-spelled values, etc. No distinction is made between option
values originating in the setup script, the command line, config
files, or what-have-you -- but if we *know* something originated in
the setup script, we'll raise DistutilsSetupError instead."""
pass
class DistutilsSetupError (DistutilsError):
"""For errors that can be definitely blamed on the setup script,
such as invalid keyword arguments to 'setup()'."""
pass
class DistutilsPlatformError (DistutilsError):
"""We don't know how to do something on the current platform (but
we do know how to do it on some platform) -- eg. trying to compile
C files on a platform not supported by a CCompiler subclass."""
pass
class DistutilsExecError (DistutilsError):
"""Any problems executing an external program (such as the C
compiler, when compiling C files)."""
pass
class DistutilsInternalError (DistutilsError):
"""Internal inconsistencies or impossibilities (obviously, this
should never be seen if the code is working!)."""
pass
class DistutilsTemplateError (DistutilsError):
"""Syntax error in a file list template."""
class DistutilsByteCompileError(DistutilsError):
"""Byte compile error."""
# Exception classes used by the CCompiler implementation classes
class CCompilerError (Exception):
"""Some compile/link operation failed."""
class PreprocessError (CCompilerError):
"""Failure to preprocess one or more C/C++ files."""
class CompileError (CCompilerError):
"""Failure to compile one or more C/C++ source files."""
class LibError (CCompilerError):
"""Failure to create a static library from one or more C/C++ object
files."""
class LinkError (CCompilerError):
"""Failure to link one or more C/C++ object files into an executable
or shared library file."""
class UnknownFileError (CCompilerError):
"""Attempt to process an unknown file type."""
| lgpl-3.0 |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/htmlentitydefs.py | 390 | 18054 | """HTML character entity references."""
# maps the HTML entity name to the Unicode codepoint
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': 0x0391, # greek capital letter alpha, U+0391
'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': 0x0392, # greek capital letter beta, U+0392
'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': 0x03a7, # greek capital letter chi, U+03A7
'Dagger': 0x2021, # double dagger, U+2021 ISOpub
'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
'Eta': 0x0397, # greek capital letter eta, U+0397
'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
'Iota': 0x0399, # greek capital letter iota, U+0399
'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': 0x039a, # greek capital letter kappa, U+039A
'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
'Mu': 0x039c, # greek capital letter mu, U+039C
'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': 0x039d, # greek capital letter nu, U+039D
'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': 0x039f, # greek capital letter omicron, U+039F
'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
'Rho': 0x03a1, # greek capital letter rho, U+03A1
'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
'Tau': 0x03a4, # greek capital letter tau, U+03A4
'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': 0x0396, # greek capital letter zeta, U+0396
'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
'amp': 0x0026, # ampersand, U+0026 ISOnum
'and': 0x2227, # logical and = wedge, U+2227 ISOtech
'ang': 0x2220, # angle, U+2220 ISOamso
'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
'cap': 0x2229, # intersection = cap, U+2229 ISOtech
'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': 0x00a2, # cent sign, U+00A2 ISOnum
'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
'cong': 0x2245, # approximately equal to, U+2245 ISOtech
'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': 0x222a, # union = cup, U+222A ISOtech
'curren': 0x00a4, # currency sign, U+00A4 ISOnum
'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
'dagger': 0x2020, # dagger, U+2020 ISOpub
'darr': 0x2193, # downwards arrow, U+2193 ISOnum
'deg': 0x00b0, # degree sign, U+00B0 ISOnum
'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
'diams': 0x2666, # black diamond suit, U+2666 ISOpub
'divide': 0x00f7, # division sign, U+00F7 ISOnum
'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
'emsp': 0x2003, # em space, U+2003 ISOpub
'ensp': 0x2002, # en space, U+2002 ISOpub
'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': 0x2261, # identical to, U+2261 ISOtech
'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': 0x20ac, # euro sign, U+20AC NEW
'exist': 0x2203, # there exists, U+2203 ISOtech
'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
'forall': 0x2200, # for all, U+2200 ISOtech
'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': 0x2044, # fraction slash, U+2044 NEW
'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
'gt': 0x003e, # greater-than sign, U+003E ISOnum
'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
'harr': 0x2194, # left right arrow, U+2194 ISOamsa
'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': 0x221e, # infinity, U+221E ISOtech
'int': 0x222b, # integral, U+222B ISOtech
'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
'isin': 0x2208, # element of, U+2208 ISOtech
'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
'le': 0x2264, # less-than or equal to, U+2264 ISOtech
'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
'loz': 0x25ca, # lozenge, U+25CA ISOpub
'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
'lt': 0x003c, # less-than sign, U+003C ISOnum
'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': 0x2014, # em dash, U+2014 ISOpub
'micro': 0x00b5, # micro sign, U+00B5 ISOnum
'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': 0x2212, # minus sign, U+2212 ISOtech
'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': 0x2013, # en dash, U+2013 ISOpub
'ne': 0x2260, # not equal to, U+2260 ISOtech
'ni': 0x220b, # contains as member, U+220B ISOtech
'not': 0x00ac, # not sign, U+00AC ISOnum
'notin': 0x2209, # not an element of, U+2209 ISOtech
'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
'oline': 0x203e, # overline = spacing overscore, U+203E NEW
'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
'or': 0x2228, # logical or = vee, U+2228 ISOtech
'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': 0x2202, # partial differential, U+2202 ISOtech
'permil': 0x2030, # per mille sign, U+2030 ISOtech
'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': 0x00a3, # pound sign, U+00A3 ISOnum
'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
'prop': 0x221d, # proportional to, U+221D ISOtech
'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
'radic': 0x221a, # square root = radical sign, U+221A ISOtech
'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': 0x230b, # right floor, U+230B ISOamsc
'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
'sect': 0x00a7, # section sign, U+00A7 ISOnum
'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
'spades': 0x2660, # black spade suit, U+2660 ISOpub
'sub': 0x2282, # subset of, U+2282 ISOtech
'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
'sup': 0x2283, # superset of, U+2283 ISOtech
'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
'there4': 0x2234, # therefore, U+2234 ISOtech
'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
'thinsp': 0x2009, # thin space, U+2009 ISOpub
'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
'tilde': 0x02dc, # small tilde, U+02DC ISOdia
'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
'trade': 0x2122, # trade mark sign, U+2122 ISOnum
'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
}
# maps the Unicode codepoint to the HTML entity name
codepoint2name = {}
# maps the HTML entity name to the character
# (or a character reference if the character is outside the Latin-1 range)
entitydefs = {}
for (name, codepoint) in name2codepoint.iteritems():
codepoint2name[codepoint] = name
if codepoint <= 0xff:
entitydefs[name] = chr(codepoint)
else:
entitydefs[name] = '&#%d;' % codepoint
del name, codepoint
| gpl-2.0 |
schlueter/ansible | lib/ansible/modules/cloud/vmware/vca_fw.py | 26 | 7965 | #!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vca_fw
short_description: add remove firewall rules in a gateway in a vca
description:
- Adds or removes firewall rules from a gateway in a vca environment
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
fw_rules:
description:
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
extends_documentation_fragment: vca.documentation
'''
EXAMPLES = '''
#Add a set of firewall rules
- hosts: localhost
connection: local
tasks:
- vca_fw:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'absent'
fw_rules:
- description: "ben testing"
source_ip: "Any"
dest_ip: 192.0.2.23
- description: "ben testing 2"
source_ip: 192.0.2.50
source_port: "Any"
dest_port: "22"
dest_ip: 192.0.2.101
is_enable: "true"
enable_logging: "false"
protocol: "Tcp"
policy: "allow"
'''
try:
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
except ImportError:
# normally set a flag here but it will be caught when testing for
# the existence of pyvcloud (see module_utils/vca.py). This just
# protects against generating an exception at runtime
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vca import VcaError, vca_argument_spec, vca_login
VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any']
VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description',
'dest_ip', 'dest_port', 'source_ip', 'source_port',
'protocol']
def protocol_to_tuple(protocol):
return (protocol.get_Tcp(),
protocol.get_Udp(),
protocol.get_Icmp(),
protocol.get_Other(),
protocol.get_Any())
def protocol_to_string(protocol):
protocol = protocol_to_tuple(protocol)
if protocol[0] is True:
return 'Tcp'
elif protocol[1] is True:
return 'Udp'
elif protocol[2] is True:
return 'Icmp'
elif protocol[3] is True:
return 'Other'
elif protocol[4] is True:
return 'Any'
def protocol_to_type(protocol):
try:
protocols = ProtocolsType()
setattr(protocols, protocol, True)
return protocols
except AttributeError:
raise VcaError("The value in protocol is not valid")
def validate_fw_rules(fw_rules):
for rule in fw_rules:
for k in rule.keys():
if k not in VALID_RULE_KEYS:
raise VcaError("%s is not a valid key in fw rules, please "
"check above.." % k, valid_keys=VALID_RULE_KEYS)
rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower()
rule['dest_ip'] = rule.get('dest_ip', 'Any').lower()
rule['source_port'] = str(rule.get('source_port', 'Any')).lower()
rule['source_ip'] = rule.get('source_ip', 'Any').lower()
rule['protocol'] = rule.get('protocol', 'Any').lower()
rule['policy'] = rule.get('policy', 'allow').lower()
rule['is_enable'] = rule.get('is_enable', True)
rule['enable_logging'] = rule.get('enable_logging', False)
rule['description'] = rule.get('description', 'rule added by Ansible')
return fw_rules
def fw_rules_to_dict(rules):
fw_rules = list()
for rule in rules:
fw_rules.append(
dict(
dest_port=rule.get_DestinationPortRange().lower(),
dest_ip=rule.get_DestinationIp().lower().lower(),
source_port=rule.get_SourcePortRange().lower(),
source_ip=rule.get_SourceIp().lower(),
protocol=protocol_to_string(rule.get_Protocols()).lower(),
policy=rule.get_Policy().lower(),
is_enable=rule.get_IsEnabled(),
enable_logging=rule.get_EnableLogging(),
description=rule.get_Description()
)
)
return fw_rules
def create_fw_rule(is_enable, description, policy, protocol, dest_port,
dest_ip, source_port, source_ip, enable_logging):
return FirewallRuleType(IsEnabled=is_enable,
Description=description,
Policy=policy,
Protocols=protocol_to_type(protocol),
DestinationPortRange=dest_port,
DestinationIp=dest_ip,
SourcePortRange=source_port,
SourceIp=source_ip,
EnableLogging=enable_logging)
def main():
argument_spec = vca_argument_spec()
argument_spec.update(
dict(
fw_rules=dict(required=True, type='list'),
gateway_name=dict(default='gateway'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
fw_rules = module.params.get('fw_rules')
gateway_name = module.params.get('gateway_name')
vdc_name = module.params['vdc_name']
vca = vca_login(module)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check "
"the gateway_name param" % gateway_name)
fwservice = gateway._getFirewallService()
rules = gateway.get_fw_rules()
current_rules = fw_rules_to_dict(rules)
try:
desired_rules = validate_fw_rules(fw_rules)
except VcaError as e:
module.fail_json(msg=e.message)
result = dict(changed=False)
result['current_rules'] = current_rules
result['desired_rules'] = desired_rules
updates = list()
additions = list()
deletions = list()
for (index, rule) in enumerate(desired_rules):
try:
if rule != current_rules[index]:
updates.append((index, rule))
except IndexError:
additions.append(rule)
eol = len(current_rules) - len(desired_rules)
if eol > 0:
for rule in current_rules[eol:]:
deletions.append(rule)
for rule in additions:
if not module.check_mode:
rule['protocol'] = rule['protocol'].capitalize()
gateway.add_fw_rule(**rule)
result['changed'] = True
for index, rule in updates:
if not module.check_mode:
rule = create_fw_rule(**rule)
fwservice.replace_FirewallRule_at(index, rule)
result['changed'] = True
keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip']
for rule in deletions:
if not module.check_mode:
kwargs = dict([(k, v) for k, v in rule.items() if k in keys])
kwargs['protocol'] = protocol_to_string(kwargs['protocol'])
gateway.delete_fw_rule(**kwargs)
result['changed'] = True
if not module.check_mode and result['changed'] is True:
task = gateway.save_services_configuration()
if task:
vca.block_until_completed(task)
result['rules_updated'] = len(updates)
result['rules_added'] = len(additions)
result['rules_deleted'] = len(deletions)
return module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
AndroidOpenDevelopment/android_external_chromium_org | third_party/tlslite/tlslite/integration/httptlsconnection.py | 115 | 4314 | # Authors:
# Trevor Perrin
# Kees Bos - Added ignoreAbruptClose parameter
# Dimitris Moraitis - Anon ciphersuites
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + httplib."""
import socket
try:
import httplib
except ImportError:
# Python 3
from http import client as httplib
from tlslite.tlsconnection import TLSConnection
from tlslite.integration.clienthelper import ClientHelper
class HTTPTLSConnection(httplib.HTTPConnection, ClientHelper):
"""This class extends L{httplib.HTTPConnection} to support TLS."""
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None,
username=None, password=None,
certChain=None, privateKey=None,
checker=None,
settings=None,
ignoreAbruptClose=False,
anon=False):
"""Create a new HTTPTLSConnection.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP
or you can do certificate-based server
authentication with one of these argument combinations:
- x509Fingerprint
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods inherited from
L{httplib.HTTPConnection} such as request(), connect(), and
send(). See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP username. Requires the
'password' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.x509certchain.X509CertChain} or
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP arguments.
@type privateKey: L{tlslite.utils.rsakey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP arguments.
@type checker: L{tlslite.checker.Checker}
@param checker: Callable object called after handshaking to
evaluate the connection and raise an Exception if necessary.
@type settings: L{tlslite.handshakesettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
@type ignoreAbruptClose: bool
@param ignoreAbruptClose: ignore the TLSAbruptCloseError on
unexpected hangup.
"""
if source_address:
httplib.HTTPConnection.__init__(self, host, port, strict,
timeout, source_address)
if not source_address:
httplib.HTTPConnection.__init__(self, host, port, strict,
timeout)
self.ignoreAbruptClose = ignoreAbruptClose
ClientHelper.__init__(self,
username, password,
certChain, privateKey,
checker,
settings,
anon)
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock = TLSConnection(self.sock)
self.sock.ignoreAbruptClose = self.ignoreAbruptClose
ClientHelper._handshake(self, self.sock)
| bsd-3-clause |
dossier/dossier.web | dossier/web/tests/test_label_folders.py | 1 | 4434 | from __future__ import absolute_import, division, print_function
import pytest
from dossier.web.label_folders import Folders
from dossier.web.tests import config_local, kvl, store, label_store # noqa
@pytest.yield_fixture # noqa
def folders(store, label_store):
yield Folders(store, label_store)
@pytest.yield_fixture # noqa
def folders_prefix(store, label_store):
yield Folders(store, label_store, prefix='foo')
def test_folder_add(folders):
folders.add_folder('foo_bar')
assert list(folders.folders()) == ['foo_bar']
def test_folder_add_prefix(folders_prefix):
folders_prefix.add_folder('foo_bar')
assert list(folders_prefix.folders()) == ['foo_bar']
def test_folder_add_annotator(folders):
folders.add_folder('foo', ann_id='ann_foo')
folders.add_folder('bar', ann_id='ann_bar')
assert list(folders.folders()) == []
assert list(folders.folders(ann_id='ann_foo')) == ['foo']
assert list(folders.folders(ann_id='ann_bar')) == ['bar']
def test_folder_add_bad_id(folders):
with pytest.raises(ValueError):
folders.add_folder('foo bar')
with pytest.raises(ValueError):
folders.add_folder('foo/bar')
def test_subfolder_add(folders):
folders.add_folder('foo')
folders.add_item('foo', 'subfoo', 'a', 'suba')
assert list(folders.subfolders('foo')) == ['subfoo']
assert list(folders.items('foo', 'subfoo')) == [('a', 'suba')]
def test_subfolder_add_prefix(folders_prefix):
folders_prefix.add_folder('foo')
folders_prefix.add_item('foo', 'subfoo', 'a', 'suba')
assert list(folders_prefix.subfolders('foo')) == ['subfoo']
assert list(folders_prefix.items('foo', 'subfoo')) == [('a', 'suba')]
def test_subfolder_add_no_subtopic(folders):
folders.add_folder('foo')
folders.add_item('foo', 'subfoo', 'a')
assert list(folders.subfolders('foo')) == ['subfoo']
assert list(folders.items('foo', 'subfoo')) == [('a', '')]
def test_subfolder_add_annotator(folders):
folders.add_folder('foo', ann_id='ann_foo')
folders.add_folder('bar', ann_id='ann_bar')
folders.add_item('foo', 'subfoo', 'a', 'suba', ann_id='ann_foo')
folders.add_item('bar', 'subbar', 'b', 'subb', ann_id='ann_bar')
# Make sure the default annotator doesn't see anything.
with pytest.raises(KeyError):
folders.subfolders('foo')
with pytest.raises(KeyError):
folders.subfolders('bar')
with pytest.raises(KeyError):
next(folders.items('foo', 'subfoo'))
with pytest.raises(KeyError):
next(folders.items('bar', 'subbar'))
assert list(folders.subfolders('foo', ann_id='ann_foo')) == ['subfoo']
assert list(folders.subfolders('bar', ann_id='ann_bar')) == ['subbar']
assert list(folders.items('foo', 'subfoo', ann_id='ann_foo')) \
== [('a', 'suba')]
assert list(folders.items('bar', 'subbar', ann_id='ann_bar')) \
== [('b', 'subb')]
def test_subfolder_add_no_folder(folders):
with pytest.raises(KeyError):
folders.add_item('foo', 'subfoo', 'a', 'suba')
def test_subfolder_add_no_folder_annotator(folders):
folders.add_folder('foo', ann_id='ann_foo')
with pytest.raises(KeyError):
folders.add_item('foo', 'subfoo', 'a', 'suba')
def test_subfolder_add_bad_id(folders):
folders.add_folder('foo')
with pytest.raises(ValueError):
folders.add_item('foo', 'sub foo', 'a', 'suba')
with pytest.raises(ValueError):
folders.add_item('foo', 'sub/foo', 'a', 'suba')
def test_parent_subfolders(folders):
folders.add_folder('foo')
folders.add_item('foo', 'subfoo', 'a', 'suba')
assert list(folders.parent_subfolders('a')) == [('foo', 'subfoo')]
assert list(folders.parent_subfolders(('a', 'suba'))) \
== [('foo', 'subfoo')]
def test_parent_subfolders_annotator(folders):
folders.add_folder('foo', ann_id='ann_foo')
folders.add_item('foo', 'subfoo', 'a', 'suba', ann_id='ann_foo')
folders.add_folder('bar', ann_id='ann_bar')
folders.add_item('bar', 'subbar', 'a', 'suba', ann_id='ann_bar')
# Make sure anonymous can't see them.
assert list(folders.parent_subfolders('a')) == []
assert list(folders.parent_subfolders(('a', 'suba'))) == []
assert list(folders.parent_subfolders('a', ann_id='ann_foo')) \
== [('foo', 'subfoo')]
assert list(folders.parent_subfolders(('a', 'suba'), ann_id='ann_bar')) \
== [('bar', 'subbar')]
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.