repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
srikk595/Multilingual-Search-System-for-tweets
|
refs/heads/master
|
partA/venv/lib/python2.7/site-packages/wheel/test/test_basic.py
|
472
|
"""
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
|
wilsoncampusano/gae-boilerplate
|
refs/heads/master
|
bp_includes/external/wtforms/compat.py
|
119
|
import sys
if sys.version_info[0] >= 3:
text_type = str
string_types = str,
iteritems = lambda o: o.items()
itervalues = lambda o: o.values()
izip = zip
else:
text_type = unicode
string_types = basestring,
iteritems = lambda o: o.iteritems()
itervalues = lambda o: o.itervalues()
from itertools import izip
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
|
AIML/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
CallaJun/hackprince
|
refs/heads/master
|
indico/numpy/core/generate_numpy_api.py
|
113
|
from __future__ import division, print_function
import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
# use annotated api when running under cpychecker
h_template = r"""
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
#else
if (!PyCObject_Check(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version %%x but this version of numpy is %%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version %%x but this version of numpy is %%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#if PY_VERSION_HEX >= 0x03000000
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
#else
#define NUMPY_IMPORT_ARRAY_RETVAL
#endif
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
Numpy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
f.return_type,
f.args, api_name)
for name, val in global_vars.items():
index, type = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, val in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, val in types_api.items():
index = val[0]
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
raise AssertionError("Multiarray API size mismatch %d %d" %
(len(multiarray_api_dict), len(multiarray_api_index)))
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# write to documentation
fid = open(doc_file, 'w')
fid.write(c_api_header)
for func in numpyapi_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
|
twiindan/selenium_lessons
|
refs/heads/master
|
02_data_types/exercises/03_lifo_exercice.py
|
1
|
# We can implement a LIFO (Last In First Out) Queue using the append and pop list methods.
stack = ["python", "selenium", "hello"]
element = None
second_element = None
# add an element to the end of the list
print (stack)
# retrieve the last item from list
print(stack)
print(element)
# retrieve the last item from list
print(stack)
print(second_element)
# add an element to the end of the list
print(stack)
|
kenorb-contrib/BitTorrent
|
refs/heads/master
|
BitTorrent/IPC.py
|
2
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Written by Greg Hazel
# based on code by Uoti Urpala
from __future__ import generators
import os
import time
import Queue
import socket
import logging
import traceback
if os.name == 'nt':
from BitTorrent import pykill
#import BTL.likewin32api as win32api
import win32api
import win32event
import winerror
import win32ui # needed for dde
import dde
import pywin.mfc.object
from binascii import b2a_hex
from BTL.translation import _
from BitTorrent.RawServer_twisted import Handler
from BitTorrent.platform import get_dot_dir
from BitTorrent import BTFailure
from BTL.platform import app_name, encode_for_filesystem
from BTL.exceptions import str_exc
ipc_logger = logging.getLogger('IPC')
ipc_logger.setLevel(logging.DEBUG)
def toint(s):
return int(b2a_hex(s), 16)
def tobinary(i):
return (chr(i >> 24) + chr((i >> 16) & 0xFF) +
chr((i >> 8) & 0xFF) + chr(i & 0xFF))
CONTROL_SOCKET_PORT = 46881
class ControlsocketListener(Handler):
def __init__(self, callback):
self.callback = callback
def connection_made(self, connection):
connection.handler = MessageReceiver(self.callback)
class MessageReceiver(Handler):
def __init__(self, callback):
self.callback = callback
self._buffer = []
self._buffer_len = 0
self._reader = self._read_messages()
self._next_len = self._reader.next()
def _read_messages(self):
while True:
yield 4
l = toint(self._message)
yield l
action = self._message
if action in ('no-op',):
self.callback(action, None)
else:
yield 4
l = toint(self._message)
yield l
data = self._message
if action in ('show_error','start_torrent'):
self.callback(action, data)
else:
yield 4
l = toint(self._message)
yield l
path = self._message
if action in ('publish_torrent'):
self.callback(action, data, path)
# copied from Connecter.py
def data_came_in(self, conn, s):
while True:
i = self._next_len - self._buffer_len
if i > len(s):
self._buffer.append(s)
self._buffer_len += len(s)
return
m = s[:i]
if self._buffer_len > 0:
self._buffer.append(m)
m = ''.join(self._buffer)
self._buffer = []
self._buffer_len = 0
s = s[i:]
self._message = m
try:
self._next_len = self._reader.next()
except StopIteration:
self._reader = None
conn.close()
return
def connection_lost(self, conn):
self._reader = None
pass
def connection_flushed(self, conn):
pass
class IPC(object):
"""Used for communication between raw server thread and other threads."""
def __init__(self, rawserver, config, name="controlsocket"):
self.rawserver = rawserver
self.name = name
self.config = config
self.callback = None
self._command_q = Queue.Queue()
def create(self):
pass
def start(self, callback):
self.callback = callback
while not self._command_q.empty():
self.callback(*self._command_q.get())
def send_command(self, command, *args):
pass
def handle_command(self, *args):
if callable(self.callback):
return self.callback(*args)
self._command_q.put(args)
def stop(self):
pass
class IPCSocketBase(IPC):
def __init__(self, *args):
IPC.__init__(self, *args)
self.port = CONTROL_SOCKET_PORT
self.controlsocket = None
def start(self, callback):
IPC.start(self, callback)
self.rawserver.start_listening(self.controlsocket,
ControlsocketListener(self.handle_command))
def stop(self):
# safe double-stop, since MultiTorrent seems to be prone to do so
if self.controlsocket:
# it's possible we're told to stop after controlsocket creation but
# before rawserver registration
if self.rawserver:
self.rawserver.stop_listening(self.controlsocket)
self.controlsocket.close()
self.controlsocket = None
class IPCUnixSocket(IPCSocketBase):
def __init__(self, *args):
IPCSocketBase.__init__(self, *args)
data_dir,bad = encode_for_filesystem(self.config['data_dir'])
if bad:
raise BTFailure(_("Invalid path encoding."))
self.socket_filename = os.path.join(data_dir, self.name)
def create(self):
filename = self.socket_filename
if os.path.exists(filename):
try:
self.send_command('no-op')
except BTFailure:
pass
else:
raise BTFailure(_("Could not create control socket: already in use"))
try:
os.unlink(filename)
except OSError, e:
raise BTFailure(_("Could not remove old control socket filename:")
+ str_exc(e))
try:
controlsocket = self.rawserver.create_unixserversocket(filename)
except socket.error, e:
raise BTFailure(_("Could not create control socket: ") + str_exc(e))
self.controlsocket = controlsocket
# blocking version without rawserver
def send_command(self, command, *args):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
filename = self.socket_filename
try:
s.connect(filename)
s.send(tobinary(len(command)))
s.send(command)
for arg in args:
s.send(tobinary(len(arg)))
s.send(arg)
s.close()
except socket.error, e:
s.close()
raise BTFailure(_("Could not send command: ") + str_exc(e))
class IPCWin32Socket(IPCSocketBase):
def __init__(self, *args):
IPCSocketBase.__init__(self, *args)
self.socket_filename = os.path.join(self.config['data_dir'], self.name)
self.mutex = None
self.master = 0
def _get_sic_path(self):
configdir = get_dot_dir()
filename = os.path.join(configdir, ".btcontrol")
return filename
def create(self):
obtain_mutex = 1
mutex = win32event.CreateMutex(None, obtain_mutex, app_name)
# prevent the PyHANDLE from going out of scope, ints are fine
self.mutex = int(mutex)
mutex.Detach()
lasterror = win32api.GetLastError()
if lasterror == winerror.ERROR_ALREADY_EXISTS:
takeover = 0
try:
# if the mutex already exists, discover which port to connect to.
# if something goes wrong with that, tell us to take over the
# role of master
takeover = self.discover_sic_socket()
except:
pass
if not takeover:
raise BTFailure(_("Global mutex already created."))
self.master = 1
# lazy free port code
port_limit = 50000
while self.port < port_limit:
try:
controlsocket = self.rawserver.create_serversocket(self.port,
'127.0.0.1')
self.controlsocket = controlsocket
break
except socket.error, e:
self.port += 1
if self.port >= port_limit:
raise BTFailure(_("Could not find an open port!"))
filename = self._get_sic_path()
(path, name) = os.path.split(filename)
try:
os.makedirs(path)
except OSError, e:
# 17 is dir exists
if e.errno != 17:
BTFailure(_("Could not create application data directory!"))
f = open(filename, "w")
f.write(str(self.port))
f.close()
# we're done writing the control file, release the mutex so other instances can lock it and read the file
# but don't destroy the handle until the application closes, so that the named mutex is still around
win32event.ReleaseMutex(self.mutex)
def discover_sic_socket(self):
takeover = 0
# mutex exists and has been opened (not created, not locked).
# wait for it so we can read the file
r = win32event.WaitForSingleObject(self.mutex, win32event.INFINITE)
# WAIT_OBJECT_0 means the mutex was obtained
# WAIT_ABANDONED means the mutex was obtained, and it had previously been abandoned
if (r != win32event.WAIT_OBJECT_0) and (r != win32event.WAIT_ABANDONED):
raise BTFailure(_("Could not acquire global mutex lock for controlsocket file!"))
filename = self._get_sic_path()
try:
f = open(filename, "r")
self.port = int(f.read())
f.close()
except:
if r == win32event.WAIT_ABANDONED:
ipc_logger.warning(_("A previous instance of BT was not cleaned up properly. Continuing."))
# take over the role of master
takeover = 1
else:
ipc_logger.warning((_("Another instance of BT is running, but \"%s\" does not exist.\n") % filename)+
_("I'll guess at the port."))
try:
self.port = CONTROL_SOCKET_PORT
self.send_command('no-op')
ipc_logger.warning(_("Port found: %d") % self.port)
try:
f = open(filename, "w")
f.write(str(self.port))
f.close()
except:
traceback.print_exc()
except:
# this is where this system falls down.
# There's another copy of BitTorrent running, or something locking the mutex,
# but I can't communicate with it.
ipc_logger.warning(_("Could not find port."))
# we're done reading the control file, release the mutex so other instances can lock it and read the file
win32event.ReleaseMutex(self.mutex)
return takeover
#blocking version without rawserver
def send_command(self, command, *datas):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('127.0.0.1', self.port))
s.send(tobinary(len(command)))
s.send(command)
for data in datas:
data = data.encode('utf-8')
s.send(tobinary(len(data)))
s.send(data)
s.close()
except socket.error, e:
try:
s.close()
except:
pass
raise BTFailure(_("Could not send command: ") + str_exc(e))
def stop(self):
if self.master:
r = win32event.WaitForSingleObject(self.mutex, win32event.INFINITE)
filename = self._get_sic_path()
try:
os.remove(filename)
except OSError, e:
# print, but continue
traceback.print_exc()
self.master = 0
win32event.ReleaseMutex(self.mutex)
# close it so the named mutex goes away
win32api.CloseHandle(self.mutex)
self.mutex = None
if os.name == 'nt':
class HandlerObject(pywin.mfc.object.Object):
def __init__(self, handler, target):
self.handler = handler
pywin.mfc.object.Object.__init__(self, target)
class Topic(HandlerObject):
def __init__(self, handler, target):
target.AddItem(dde.CreateStringItem(""))
HandlerObject.__init__(self, handler, target)
def Request(self, x):
# null byte hack
x = x.replace("\\**0", "\0")
items = x.split("|")
self.handler(items[0], *items[1:])
return ("OK")
# remote procedure call
#def Exec(self, x):
# exec x
class Server(HandlerObject):
def CreateSystemTopic(self):
return Topic(self.handler, dde.CreateServerSystemTopic())
def Status(self, s):
ipc_logger.debug(_("IPC Status: %s") % s)
def stop(self):
self.Shutdown()
self.Destroy()
class SingleInstanceMutex(object):
def __init__(self):
obtain_mutex = False
self.mutex = win32event.CreateMutex(None, obtain_mutex, app_name)
self.lasterror = win32api.GetLastError()
def close(self):
del self.mutex
def IsAnotherInstanceRunning(self):
return winerror.ERROR_ALREADY_EXISTS == self.lasterror
if os.name == 'nt':
g_mutex = SingleInstanceMutex()
class IPCWin32DDE(IPC):
def create(self):
self.server = None
if g_mutex.IsAnotherInstanceRunning():
# test whether there is a program actually running that holds
# the mutex.
for i in xrange(10):
# try to connect first
self.client = Server(None, dde.CreateServer())
self.client.Create(app_name, dde.CBF_FAIL_SELFCONNECTIONS|dde.APPCMD_CLIENTONLY)
self.conversation = dde.CreateConversation(self.client)
try:
self.conversation.ConnectTo(app_name, self.name)
raise BTFailure("DDE Conversation connected.")
except dde.error, e:
# no one is listening
pass
# clean up
self.client.stop()
del self.client
del self.conversation
ipc_logger.warning("No DDE Server is listening, but the global mutex exists. Retry %d!" % i)
time.sleep(1.0)
# oh no you didn't!
if i == 5:
pykill.kill_process(app_name)
# continuing might be dangerous (two instances)
raise Exception("No DDE Server is listening, but the global mutex exists!")
# start server
self.server = Server(self.handle_command, dde.CreateServer())
self.server.Create(app_name, dde.CBF_FAIL_SELFCONNECTIONS|dde.APPCLASS_STANDARD)
self.server.AddTopic(Topic(self.handle_command, dde.CreateTopic(self.name)))
def send_command(self, command, *args):
s = '|'.join([command, ] + list(args))
# null byte hack
if s.count("\0") > 0:
ipc_logger.warinig("IPC: String with null byte(s):" + s)
s = s.replace("\0", "\\**0")
s = s.encode('utf-8')
result = self.conversation.Request(s)
def stop(self):
if self.server:
server = self.server
self.server = None
server.stop()
if os.name == 'nt':
#ipc_interface = IPCWin32Socket
ipc_interface = IPCWin32DDE
else:
ipc_interface = IPCUnixSocket
|
fnouama/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/singleLine/IndentInIfInDef.src.py
|
83
|
def f(self):
<selection> x = 1</selection>
if True:
b = 2
|
qaisermazhar/qaisermazhar.github.io
|
refs/heads/master
|
markdown_generator/publications.py
|
197
|
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
|
akloster/blender-vraag
|
refs/heads/master
|
vraag/verbs.py
|
1
|
import bpy
import bpy_types
from functools import wraps
import io_mesh_stl.blender_utils
import io_mesh_stl.stl_utils
from vraag.utils import find_materials
verbs = {}
def vraag_verb(method_or_name, base=True, extended=True):
def decorator(method):
verbs[method_or_name] = method
return method
if callable(method_or_name):
verbs[method_or_name.__name__] = method_or_name
return method_or_name
else:
return decorator
def find_materials(*args):
for arg in args:
if isinstance(arg, str):
yield bpy.data.materials[arg]
continue
if isinstance(arg, bpy.types.Material):
yield arg
continue
try:
yield from find_materials(list(arg))
except TypeError as te:
pass
def filter_by_materials(elements, *args):
materials = list(find_materials(*args))
for o in elements:
for ms in o.material_slots:
if ms.material in materials:
yield o
break
@vraag_verb
def on_layer(vl, number):
if bpy.app.version >= (2,80):
raise NotImplementedError("Layers are not implemented for Blender 2.80 and above.")
return vl.__class__([element for element in vl if element.layers[number]])
@vraag_verb
def material(vl, *args):
return vl.__class__(list(filter_by_materials(vl.elements, *args)))
def find_scenes(*args):
for arg in args:
if isinstance(arg, str):
yield bpy.data.scenes[arg]
continue
if isinstance(arg, bpy.types.Scene):
yield arg
continue
try:
l = list(args)
except TypeError as te:
pass
else:
yield from find_scenes(l)
def filter_by_scenes(elements, *args):
scenes = list(find_scenes(*args))
for o in elements:
for scene in scenes:
if o.name in scene.objects:
yield o
break
@vraag_verb
def scene(vl, *args):
return vl.__class__(list(filter_by_scenes(vl.elements, *args)))
@vraag_verb
def hide(vl):
if bpy.app.version >= (2,80):
for element in vl.elements:
element.hide_viewport = True
else:
for element in vl.elements:
element.hide = True
return vl
@vraag_verb
def names(vl):
for element in vl.elements:
yield element.name
@vraag_verb
def show(vl):
for element in vl.elements:
if bpy.app.version >= (2,80):
element.hide_viewport = False
else:
element.hide = False
return vl
@vraag_verb
def set_prop(vl, property_name, value):
for element in vl.elements:
try:
setattr(element, property_name, value)
except:
continue
return vl
@vraag_verb
def get_prop(vl, property_name):
l = []
for element in vl.elements:
try:
l.append(getattr(element, property_name))
except:
continue
return l
@vraag_verb
def iget_prop(vl, property_name):
for element in vl.elements:
try:
yield getattr(element, property_name)
except:
continue
@vraag_verb
def iprop(vl, property_name, value=None):
l = []
if value is None:
yield iget_prop(vl, property_name)
else:
yield iset_prop(vl, property_name, value)
@vraag_verb
def prop(vl, property_name, value=None):
l = []
if value is None:
return get_prop(vl, property_name)
else:
return set_prop(vl, property_name, value)
@vraag_verb("apply")
def vraag_apply(vl, func):
for element in vl.elements:
yield func(element)
return vl
@vraag_verb("map")
def vraag_map(vl, func):
return map(func, vl.elements)
@vraag_verb("filter")
def vraag_filter(vl, func):
return vl[vl.map(func)]
@vraag_verb
def activate(vl):
activation_types = [(bpy_types.Object, bpy.context.scene.objects)]
for activation_type, collection in activation_types:
for element in vl.elements:
if type(element) is activation_type:
collection.active = element
break
return vl
@vraag_verb
def select(vl):
for element in vl.elements:
if type(element) is bpy_types.Object:
if bpy.app.version >= (2,80):
element.select_set(True)
else:
element.select = True
return vl
@vraag_verb
def remove(vl):
for element in vl.elements:
bpy.data.objects.remove(element, do_unlink=True)
return None
@vraag_verb
def deselect(vl):
for element in vl.elements:
if bpy.app.version >= (2,80):
element.select_set(False)
else:
element.select = False
return vl
@vraag_verb
def export_stl(vl, filepath, ascii=False):
faces = []
for element in vl.elements:
faces += list(io_mesh_stl.blender_utils.faces_from_mesh(element,
use_mesh_modifiers=True,
global_matrix=element.matrix_world
))
io_mesh_stl.stl_utils.write_stl(filepath, faces, ascii=ascii)
return vl
|
kool79/intellij-community
|
refs/heads/master
|
python/helpers/docutils/parsers/rst/languages/ru.py
|
57
|
# $Id: ru.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Roman Suzi <rnd@onego.ru>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'\u0431\u043b\u043e\u043a-\u0441\u0442\u0440\u043e\u043a': u'line-block',
u'meta': u'meta',
u'\u043e\u0431\u0440\u0430\u0431\u043e\u0442\u0430\u043d\u043d\u044b\u0439-\u043b\u0438\u0442\u0435\u0440\u0430\u043b':
u'parsed-literal',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u043d\u0430\u044f-\u0446\u0438\u0442\u0430\u0442\u0430':
u'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'\u0441\u044b\u0440\u043e\u0439': u'raw',
u'\u0437\u0430\u043c\u0435\u043d\u0430': u'replace',
u'\u0442\u0435\u0441\u0442\u043e\u0432\u0430\u044f-\u0434\u0438\u0440\u0435\u043a\u0442\u0438\u0432\u0430-restructuredtext':
u'restructuredtext-test-directive',
u'\u0446\u0435\u043b\u0435\u0432\u044b\u0435-\u0441\u043d\u043e\u0441\u043a\u0438':
u'target-notes',
u'unicode': u'unicode',
u'\u0434\u0430\u0442\u0430': u'date',
u'\u0431\u043e\u043a\u043e\u0432\u0430\u044f-\u043f\u043e\u043b\u043e\u0441\u0430':
u'sidebar',
u'\u0432\u0430\u0436\u043d\u043e': u'important',
u'\u0432\u043a\u043b\u044e\u0447\u0430\u0442\u044c': u'include',
u'\u0432\u043d\u0438\u043c\u0430\u043d\u0438\u0435': u'attention',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': u'highlights',
u'\u0437\u0430\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'admonition',
u'\u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435':
u'image',
u'\u043a\u043b\u0430\u0441\u0441': u'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'\u043d\u043e\u043c\u0435\u0440-\u0440\u0430\u0437\u0434\u0435\u043b\u0430':
u'sectnum',
u'\u043d\u0443\u043c\u0435\u0440\u0430\u0446\u0438\u044f-\u0440\u0430\u0437'
u'\u0434\u0435\u043b\u043e\u0432': u'sectnum',
u'\u043e\u043f\u0430\u0441\u043d\u043e': u'danger',
u'\u043e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e': u'caution',
u'\u043e\u0448\u0438\u0431\u043a\u0430': u'error',
u'\u043f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430': u'tip',
u'\u043f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436\u0434\u0435\u043d'
u'\u0438\u0435': u'warning',
u'\u043f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'note',
u'\u0440\u0438\u0441\u0443\u043d\u043e\u043a': u'figure',
u'\u0440\u0443\u0431\u0440\u0438\u043a\u0430': u'rubric',
u'\u0441\u043e\u0432\u0435\u0442': u'hint',
u'\u0441\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435': u'contents',
u'\u0442\u0435\u043c\u0430': u'topic',
u'\u044d\u043f\u0438\u0433\u0440\u0430\u0444': u'epigraph',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',}
"""Russian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'\u0430\u043a\u0440\u043e\u043d\u0438\u043c': 'acronym',
u'\u0430\u043d\u043e\u043d\u0438\u043c\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'anonymous-reference',
u'\u0431\u0443\u043a\u0432\u0430\u043b\u044c\u043d\u043e': 'literal',
u'\u0432\u0435\u0440\u0445\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
'superscript',
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': 'emphasis',
u'\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'named-reference',
u'\u0438\u043d\u0434\u0435\u043a\u0441': 'index',
u'\u043d\u0438\u0436\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
'subscript',
u'\u0441\u0438\u043b\u044c\u043d\u043e\u0435-\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435':
'strong',
u'\u0441\u043e\u043a\u0440\u0430\u0449\u0435\u043d\u0438\u0435':
'abbreviation',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u0437\u0430\u043c\u0435\u043d\u0430':
'substitution-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-pep': 'pep-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-rfc': 'rfc-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-uri': 'uri-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0437\u0430\u0433\u043b\u0430\u0432\u0438\u0435':
'title-reference',
u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0441\u043d\u043e\u0441\u043a\u0443':
'footnote-reference',
u'\u0446\u0438\u0442\u0430\u0442\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
'citation-reference',
u'\u0446\u0435\u043b\u044c': 'target',
u'raw (translation required)': 'raw',}
"""Mapping of Russian role names to canonical role names for interpreted text.
"""
|
walterbender/turtle3D
|
refs/heads/master
|
util/configfile.py
|
4
|
#!/usr/bin/env python
#
# Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import gobject
class ConfigFile(gobject.GObject):
"""Load/save a simple (key = value) config file"""
__gsignals__ = {
'configuration-loaded': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
()),
'configuration-saved': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
()),
}
def __init__(self, config_file_path, valid_keys={}):
gobject.GObject.__init__(self)
self._config_file_path = config_file_path
self._valid_keys = valid_keys
self._config_hash = {}
self._is_loaded = False
def set_valid_keys(self, valid_keys):
self._valid_keys = valid_keys
def is_loaded(self):
return self._is_loaded
def get(self, key, empty_if_not_loaded=False):
if not key in self._valid_keys:
raise RuntimeError("Unknown config value %s" % key)
if key in self._config_hash:
value = self._config_hash[key]
else:
if self._valid_keys[key]["type"] == "text":
value = ""
elif self._valid_keys[key]["type"] == "boolean":
value = False
elif self._valid_keys[key]["type"] == "integer":
value = 0
return value
def set(self, key, value):
if not key in self._valid_keys:
raise RuntimeError("Unknown config value %s" % key)
self._config_hash[key] = value
def load(self):
try:
config_file = open(self._config_file_path, 'r')
lines = config_file.readlines()
config_file.close()
for line in lines:
line = line.strip()
k, v = line.split('=')
k = k.strip(' ')
v = v.strip(' ')
if not k in self._valid_keys:
raise RuntimeError("Unknown config value %s" % k)
value_type = self._valid_keys[k]["type"]
if value_type == "text":
value = v
elif value_type == "boolean":
value = eval(v)
elif value_type == "integer":
value = int(v)
self._config_hash[k] = value
self._is_loaded = True
self.emit('configuration-loaded')
except Exception, e:
print e
return self._is_loaded
def save(self):
config_file = open(self._config_file_path, 'w')
for k in self._config_hash.keys():
v = self._config_hash[k]
l = "%s = %s\n" % (k, v)
config_file.write(l)
config_file.close()
self.emit('configuration-saved')
def dump_keys(self):
print "\n\nDumping keys\n\n"
for k in self._config_hash.keys():
v = self._config_hash[k]
l = "%s = %s\n" % (k, v)
print l
def test_save_load(test_config_file):
keys = {}
keys["nick"] = {"type": "text"}
keys["account_id"] = {"type": "text"}
keys["server"] = {"type": "text"}
keys["port"] = {"type": "text"}
keys["password"] = {"type": "text"}
keys["register"] = {"type": "text"}
c = ConfigFile(test_config_file)
c.set_valid_keys(keys)
c.set("nick", "rgs")
c.set("account_id", "rgs@andromeda")
c.set("server", "andromeda")
c.set("port", 5223)
c.set("password", "97c74fa0dc3b39b8c87f119fa53cced2b7040786")
c.set("register", True)
c.save()
c = ConfigFile(test_config_file)
c.set_valid_keys(keys)
c.load()
c.dump_keys()
def _configuration_saved_cb(config_file_obj):
print "_configuration_saved_cb called"
config_file_obj.dump_keys()
def _configuration_loaded_cb(config_file_obj):
print "_configuration_loaded_cb called"
config_file_obj.dump_keys()
def test_signals(test_config_file):
keys = {}
keys["nick"] = {"type": "text"}
keys["account_id"] = {"type": "text"}
keys["server"] = {"type": "text"}
keys["port"] = {"type": "text"}
keys["password"] = {"type": "text"}
keys["register"] = {"type": "text"}
c = ConfigFile(test_config_file)
c.connect('configuration-saved', _configuration_saved_cb)
c.set_valid_keys(keys)
c.set("nick", "rgs")
c.set("account_id", "rgs@andromeda")
c.set("server", "andromeda")
c.set("port", 5223)
c.set("password", "97c74fa0dc3b39b8c87f119fa53cced2b7040786")
c.set("register", True)
c.save()
c = ConfigFile(test_config_file)
c.connect('configuration-loaded', _configuration_loaded_cb)
c.set_valid_keys(keys)
c.load()
if __name__ == "__main__":
test_save_load("/tmp/configfile.0001")
test_signals("/tmp/configfile.0002")
|
ridfrustum/lettuce
|
refs/heads/master
|
tests/functional/invalid_module_name/terrain.py
|
57
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce import world
world.invalid_passed = True
|
gymglish/pelican
|
refs/heads/master
|
pelican/tests/test_generators.py
|
17
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import locale
import os
from codecs import open
from shutil import rmtree
from tempfile import mkdtemp
from pelican.generators import (ArticlesGenerator, Generator, PagesGenerator,
StaticGenerator, TemplatePagesGenerator)
from pelican.tests.support import get_settings, unittest
from pelican.writers import Writer
try:
from unittest.mock import MagicMock
except ImportError:
try:
from mock import MagicMock
except ImportError:
MagicMock = False
CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content')
class TestGenerator(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
self.settings = get_settings()
self.settings['READERS'] = {'asc': None}
self.generator = Generator(self.settings.copy(), self.settings,
CUR_DIR, self.settings['THEME'], None)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_include_path(self):
self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'}
filename = os.path.join(CUR_DIR, 'content', 'article.rst')
include_path = self.generator._include_path
self.assertTrue(include_path(filename))
self.assertTrue(include_path(filename, extensions=('rst',)))
self.assertFalse(include_path(filename, extensions=('md',)))
ignored_file = os.path.join(CUR_DIR, 'content', 'ignored1.rst')
self.assertFalse(include_path(ignored_file))
def test_get_files_exclude(self):
"""Test that Generator.get_files() properly excludes directories.
"""
# We use our own Generator so we can give it our own content path
generator = Generator(
context=self.settings.copy(),
settings=self.settings,
path=os.path.join(CUR_DIR, 'nested_content'),
theme=self.settings['THEME'], output_path=None)
filepaths = generator.get_files(paths=['maindir'])
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'}
self.assertFalse(
expected_files - found_files,
"get_files() failed to find one or more files")
# Test string as `paths` argument rather than list
filepaths = generator.get_files(paths='maindir')
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'}
self.assertFalse(
expected_files - found_files,
"get_files() failed to find one or more files")
filepaths = generator.get_files(paths=[''], exclude=['maindir'])
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn(
'maindir.md', found_files,
"get_files() failed to exclude a top-level directory")
self.assertNotIn(
'subdir.md', found_files,
"get_files() failed to exclude a subdir of an excluded directory")
filepaths = generator.get_files(
paths=[''],
exclude=[os.path.join('maindir', 'subdir')])
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn(
'subdir.md', found_files,
"get_files() failed to exclude a subdirectory")
filepaths = generator.get_files(paths=[''], exclude=['subdir'])
found_files = {os.path.basename(f) for f in filepaths}
self.assertIn(
'subdir.md', found_files,
"get_files() excluded a subdirectory by name, ignoring its path")
class TestArticlesGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None}
settings['CACHE_CONTENT'] = False
cls.generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
cls.generator.generate_context()
cls.articles = cls.distill_articles(cls.generator.articles)
def setUp(self):
self.temp_cache = mkdtemp(prefix='pelican_cache.')
def tearDown(self):
rmtree(self.temp_cache)
@staticmethod
def distill_articles(articles):
return [[article.title, article.status, article.category.name,
article.template] for article in articles]
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_generate_feeds(self):
settings = get_settings()
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
writer = MagicMock()
generator.generate_feeds(writer)
writer.write_feed.assert_called_with([], settings,
'feeds/all.atom.xml')
generator = ArticlesGenerator(
context=settings, settings=get_settings(FEED_ALL_ATOM=None),
path=None, theme=settings['THEME'], output_path=None)
writer = MagicMock()
generator.generate_feeds(writer)
self.assertFalse(writer.write_feed.called)
def test_generate_context(self):
articles_expected = [
['Article title', 'published', 'Default', 'article'],
['Article with markdown and summary metadata multi', 'published',
'Default', 'article'],
['Article with markdown and summary metadata single', 'published',
'Default', 'article'],
['Article with markdown containing footnotes', 'published',
'Default', 'article'],
['Article with template', 'published', 'Default', 'custom'],
['Rst with filename metadata', 'published', 'yeah', 'article'],
['Test Markdown extensions', 'published', 'Default', 'article'],
['Test markdown File', 'published', 'test', 'article'],
['Test md File', 'published', 'test', 'article'],
['Test mdown File', 'published', 'test', 'article'],
['Test metadata duplicates', 'published', 'test', 'article'],
['Test mkd File', 'published', 'test', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
['Article with Nonconformant HTML meta tags', 'published',
'Default', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'Default', 'article'],
['This is an article with category !', 'published', 'yeah',
'article'],
['This is an article with multiple authors!', 'published',
'Default', 'article'],
['This is an article with multiple authors!', 'published',
'Default', 'article'],
['This is an article with multiple authors in list format!',
'published', 'Default', 'article'],
['This is an article with multiple authors in lastname, '
'firstname format!', 'published', 'Default', 'article'],
['This is an article without category !', 'published', 'Default',
'article'],
['This is an article without category !', 'published',
'TestCategory', 'article'],
['An Article With Code Block To Test Typogrify Ignore',
'published', 'Default', 'article'],
['マックOS X 10.8でパイソンとVirtualenvをインストールと設定',
'published', '指導書', 'article'],
]
self.assertEqual(sorted(articles_expected), sorted(self.articles))
def test_generate_categories(self):
# test for name
# categories are grouped by slug; if two categories have the same slug
# but different names they will be grouped together, the first one in
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in self.generator.categories]
categories_alternatives = (
sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書']),
sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in self.generator.categories]
categories_expected = ['default', 'testcategory', 'yeah', 'test',
'zhi-dao-shu']
self.assertEqual(sorted(categories), sorted(categories_expected))
def test_do_not_use_folder_as_category(self):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['USE_FOLDER_AS_CATEGORY'] = False
settings['CACHE_PATH'] = self.temp_cache
settings['READERS'] = {'asc': None}
settings['filenames'] = {}
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
# test for name
# categories are grouped by slug; if two categories have the same slug
# but different names they will be grouped together, the first one in
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in generator.categories]
categories_alternatives = (
sorted(['Default', 'Yeah', 'test', '指導書']),
sorted(['Default', 'yeah', 'test', '指導書']),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in generator.categories]
categories_expected = ['default', 'yeah', 'test', 'zhi-dao-shu']
self.assertEqual(sorted(categories), sorted(categories_expected))
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_direct_templates_save_as_default(self):
settings = get_settings(filenames={})
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
write = MagicMock()
generator.generate_direct_templates(write)
write.assert_called_with("archives.html",
generator.get_template("archives"), settings,
blog=True, paginated={}, page_name='archives')
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_direct_templates_save_as_modified(self):
settings = get_settings()
settings['DIRECT_TEMPLATES'] = ['archives']
settings['ARCHIVES_SAVE_AS'] = 'archives/index.html'
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
write = MagicMock()
generator.generate_direct_templates(write)
write.assert_called_with("archives/index.html",
generator.get_template("archives"), settings,
blog=True, paginated={},
page_name='archives/index')
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_direct_templates_save_as_false(self):
settings = get_settings()
settings['DIRECT_TEMPLATES'] = ['archives']
settings['ARCHIVES_SAVE_AS'] = False
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
write = MagicMock()
generator.generate_direct_templates(write)
self.assertEqual(write.call_count, 0)
def test_per_article_template(self):
"""
Custom template articles get the field but standard/unset are None
"""
custom_template = ['Article with template', 'published', 'Default',
'custom']
standard_template = ['This is a super article !', 'published', 'Yeah',
'article']
self.assertIn(custom_template, self.articles)
self.assertIn(standard_template, self.articles)
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_period_in_timeperiod_archive(self):
"""
Test that the context of a generated period_archive is passed
'period' : a tuple of year, month, day according to the time period
"""
settings = get_settings(filenames={})
settings['YEAR_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/index.html'
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970]
self.assertEqual(len(dates), 1)
# among other things it must have at least been called with this
settings["period"] = (1970,)
write.assert_called_with("posts/1970/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
del settings["period"]
settings['MONTH_ARCHIVE_SAVE_AS'] = \
'posts/{date:%Y}/{date:%b}/index.html'
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [d for d in generator.dates
if d.date.year == 1970 and d.date.month == 1]
self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January")
# among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
del settings["period"]
settings['DAY_ARCHIVE_SAVE_AS'] = \
'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [
d for d in generator.dates if
d.date.year == 1970 and
d.date.month == 1 and
d.date.day == 1
]
self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January", 1)
# among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/01/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
def test_nonexistent_template(self):
"""Attempt to load a non-existent template"""
settings = get_settings(filenames={})
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
self.assertRaises(Exception, generator.get_template, "not_a_template")
def test_generate_authors(self):
"""Check authors generation."""
authors = [author.name for author, _ in self.generator.authors]
authors_expected = sorted(
['Alexis Métaireau', 'Author, First', 'Author, Second',
'First Author', 'Second Author'])
self.assertEqual(sorted(authors), authors_expected)
# test for slug
authors = [author.slug for author, _ in self.generator.authors]
authors_expected = ['alexis-metaireau', 'author-first',
'author-second', 'first-author', 'second-author']
self.assertEqual(sorted(authors), sorted(authors_expected))
def test_standard_metadata_in_default_metadata(self):
settings = get_settings(filenames={})
settings['CACHE_CONTENT'] = False
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['DEFAULT_METADATA'] = (('author', 'Blogger'),
# category will be ignored in favor of
# DEFAULT_CATEGORY
('category', 'Random'),
('tags', 'general, untagged'))
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
authors = sorted([author.name for author, _ in generator.authors])
authors_expected = sorted(['Alexis Métaireau', 'Blogger',
'Author, First', 'Author, Second',
'First Author', 'Second Author'])
self.assertEqual(authors, authors_expected)
categories = sorted([category.name
for category, _ in generator.categories])
categories_expected = [
sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']),
sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書'])]
self.assertIn(categories, categories_expected)
tags = sorted([tag.name for tag in generator.tags])
tags_expected = sorted(['bar', 'foo', 'foobar', 'general', 'untagged',
'パイソン', 'マック'])
self.assertEqual(tags, tags_expected)
def test_article_order_by(self):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['ARTICLE_ORDER_BY'] = 'title'
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
expected = [
'An Article With Code Block To Test Typogrify Ignore',
'Article title',
'Article with Nonconformant HTML meta tags',
'Article with markdown and summary metadata multi',
'Article with markdown and summary metadata single',
'Article with markdown containing footnotes',
'Article with template',
'Rst with filename metadata',
'Test Markdown extensions',
'Test markdown File',
'Test md File',
'Test mdown File',
'Test metadata duplicates',
'Test mkd File',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is an article with category !',
('This is an article with multiple authors in lastname, '
'firstname format!'),
'This is an article with multiple authors in list format!',
'This is an article with multiple authors!',
'This is an article with multiple authors!',
'This is an article without category !',
'This is an article without category !',
'マックOS X 10.8でパイソンとVirtualenvをインストールと設定']
articles = [article.title for article in generator.articles]
self.assertEqual(articles, expected)
# reversed title
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['ARTICLE_ORDER_BY'] = 'reversed-title'
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
articles = [article.title for article in generator.articles]
self.assertEqual(articles, list(reversed(expected)))
class TestPageGenerator(unittest.TestCase):
# Note: Every time you want to test for a new field; Make sure the test
# pages in "TestPages" have all the fields Add it to distilled in
# distill_pages Then update the assertEqual in test_generate_context
# to match expected
def setUp(self):
self.temp_cache = mkdtemp(prefix='pelican_cache.')
def tearDown(self):
rmtree(self.temp_cache)
def distill_pages(self, pages):
return [[page.title, page.status, page.template] for page in pages]
def test_generate_context(self):
settings = get_settings(filenames={})
settings['CACHE_PATH'] = self.temp_cache
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['DEFAULT_DATE'] = (1970, 1, 1)
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
hidden_pages = self.distill_pages(generator.hidden_pages)
pages_expected = [
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['This is a test page with a preset template', 'published',
'custom'],
['Page with a bunch of links', 'published', 'page'],
['A Page (Test) for sorting', 'published', 'page'],
]
hidden_pages_expected = [
['This is a test hidden page', 'hidden', 'page'],
['This is a markdown test hidden page', 'hidden', 'page'],
['This is a test hidden page with a custom template', 'hidden',
'custom']
]
self.assertEqual(sorted(pages_expected), sorted(pages))
self.assertEqual(
sorted(pages_expected),
sorted(self.distill_pages(generator.context['pages'])))
self.assertEqual(sorted(hidden_pages_expected), sorted(hidden_pages))
self.assertEqual(
sorted(hidden_pages_expected),
sorted(self.distill_pages(generator.context['hidden_pages'])))
def test_generate_sorted(self):
settings = get_settings(filenames={})
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['CACHE_PATH'] = self.temp_cache
settings['DEFAULT_DATE'] = (1970, 1, 1)
# default sort (filename)
pages_expected_sorted_by_filename = [
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['A Page (Test) for sorting', 'published', 'page'],
['Page with a bunch of links', 'published', 'page'],
['This is a test page with a preset template', 'published',
'custom'],
]
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_filename, pages)
# sort by title
pages_expected_sorted_by_title = [
['A Page (Test) for sorting', 'published', 'page'],
['Page with a bunch of links', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['This is a test page', 'published', 'page'],
['This is a test page with a preset template', 'published',
'custom'],
]
settings['PAGE_ORDER_BY'] = 'title'
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
# sort by title reversed
pages_expected_sorted_by_title = [
['This is a test page with a preset template', 'published',
'custom'],
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['Page with a bunch of links', 'published', 'page'],
['A Page (Test) for sorting', 'published', 'page'],
]
settings['PAGE_ORDER_BY'] = 'reversed-title'
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
def test_tag_and_category_links_on_generated_pages(self):
"""
Test to ensure links of the form {tag}tagname and {category}catname
are generated correctly on pages
"""
settings = get_settings(filenames={})
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['CACHE_PATH'] = self.temp_cache
settings['DEFAULT_DATE'] = (1970, 1, 1)
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages_by_title = {p.title: p.content for p in generator.pages}
test_content = pages_by_title['Page with a bunch of links']
self.assertIn('<a href="/category/yeah.html">', test_content)
self.assertIn('<a href="/tag/matsuku.html">', test_content)
class TestTemplatePagesGenerator(unittest.TestCase):
TEMPLATE_CONTENT = "foo: {{ foo }}"
def setUp(self):
self.temp_content = mkdtemp(prefix='pelicantests.')
self.temp_output = mkdtemp(prefix='pelicantests.')
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_generate_output(self):
settings = get_settings()
settings['STATIC_PATHS'] = ['static']
settings['TEMPLATE_PAGES'] = {
'template/source.html': 'generated/file.html'
}
generator = TemplatePagesGenerator(
context={'foo': 'bar'}, settings=settings,
path=self.temp_content, theme='', output_path=self.temp_output)
# create a dummy template file
template_dir = os.path.join(self.temp_content, 'template')
template_path = os.path.join(template_dir, 'source.html')
os.makedirs(template_dir)
with open(template_path, 'w') as template_file:
template_file.write(self.TEMPLATE_CONTENT)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with open(output_path, 'r') as output_file:
self.assertEqual(output_file.read(), 'foo: bar')
class TestStaticGenerator(unittest.TestCase):
def setUp(self):
self.content_path = os.path.join(CUR_DIR, 'mixed_content')
def test_static_excludes(self):
"""Test that StaticGenerator respects STATIC_EXCLUDES.
"""
settings = get_settings(
STATIC_EXCLUDES=['subdir'],
PATH=self.content_path,
STATIC_PATHS=[''],
filenames={})
context = settings.copy()
StaticGenerator(
context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']]
self.assertNotIn(
'subdir_fake_image.jpg', staticnames,
"StaticGenerator processed a file in a STATIC_EXCLUDES directory")
self.assertIn(
'fake_image.jpg', staticnames,
"StaticGenerator skipped a file that it should have included")
def test_static_exclude_sources(self):
"""Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES.
"""
settings = get_settings(
STATIC_EXCLUDE_SOURCES=True,
PATH=self.content_path,
PAGE_PATHS=[''],
STATIC_PATHS=[''],
CACHE_CONTENT=False,
filenames={})
context = settings.copy()
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(
context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']]
self.assertFalse(
any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file")
settings.update(STATIC_EXCLUDE_SOURCES=False)
context = settings.copy()
context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(
context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']]
self.assertTrue(
any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=False failed to include a markdown file")
|
ChanChiChoi/scikit-learn
|
refs/heads/master
|
sklearn/metrics/regression.py
|
175
|
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
|
citrix-openstack-build/ceilometer
|
refs/heads/master
|
ceilometer/openstack/common/db/sqlalchemy/session.py
|
3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example:
session.set_defaults(
sql_connection="sqlite:///var/lib/ceilometer/sqlite.db",
sqlite_db="/var/lib/ceilometer/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples:
def get_foo(context, foo):
return model_query(context, models.Foo).\
filter_by(foo=foo).\
first()
def update_foo(context, id, newfoo):
model_query(context, models.Foo).\
filter_by(id=id).\
update({'foo': newfoo})
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. If the connection is dropped before this is possible, the
database will implicitly rollback the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = model_query(context, models.Foo, session).\
filter_by(id=foo_id).\
first()
model_query(context, models.Bar, session).\
filter_by(id=foo_ref['bar_id']).\
update({'bar': newbar})
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so:
def update_bar(context, foo_id, newbar):
subq = model_query(context, models.Foo.id).\
filter_by(id=foo_id).\
limit(1).\
subquery()
model_query(context, models.Bar).\
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example:
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = model_query(BarModel).\
find(some_condition).\
soft_delete(synchronize_session=True)
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = model_query(BarModel, session=session).\
find(some_condition)
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import os.path
import re
import time
from eventlet import greenthread
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from ceilometer.openstack.common.db import exception
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log as logging
from ceilometer.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='ceilometer.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE')],
secret=True),
cfg.StrOpt('slave_connection',
default='',
help='The SQLAlchemy connection string used to connect to the '
'slave database',
secret=True),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False,
sqlite_fk=False, slave_session=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
_wrap.func_name = f.func_name
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri,
sqlite_fk=sqlite_fk)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _greenthread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
greenthread.sleep(0)
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL connections checked out of the pool are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
try:
dbapi_conn.cursor().execute('select 1')
except dbapi_conn.OperationalError as ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
LOG.warn(_('Got mysql server has gone away: %s'), ex)
raise sqla_exc.DisconnectionError("Database server went away")
else:
raise
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for file, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if file.endswith('session.py') and method == '_do_query':
continue
if file.endswith('api.py') and method == 'wrapper':
continue
if file.endswith('utils.py') and method == '_inner':
continue
if file.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if file.endswith('db/api.py'):
continue
# only trace inside ceilometer
index = file.rfind('ceilometer')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername
|
coldeasy/python-driver
|
refs/heads/master
|
tests/integration/cqlengine/model/test_equality_operations.py
|
1
|
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import drop_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine import columns
class TestModel(Model):
id = columns.UUID(primary_key=True, default=lambda:uuid4())
count = columns.Integer()
text = columns.Text(required=False)
class TestEqualityOperators(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestEqualityOperators, cls).setUpClass()
sync_table(TestModel)
def setUp(self):
super(TestEqualityOperators, self).setUp()
self.t0 = TestModel.create(count=5, text='words')
self.t1 = TestModel.create(count=5, text='words')
@classmethod
def tearDownClass(cls):
super(TestEqualityOperators, cls).tearDownClass()
drop_table(TestModel)
def test_an_instance_evaluates_as_equal_to_itself(self):
"""
"""
assert self.t0 == self.t0
def test_two_instances_referencing_the_same_rows_and_different_values_evaluate_not_equal(self):
"""
"""
t0 = TestModel.get(id=self.t0.id)
t0.text = 'bleh'
assert t0 != self.t0
def test_two_instances_referencing_the_same_rows_and_values_evaluate_equal(self):
"""
"""
t0 = TestModel.get(id=self.t0.id)
assert t0 == self.t0
def test_two_instances_referencing_different_rows_evaluate_to_not_equal(self):
"""
"""
assert self.t0 != self.t1
|
dannyboi104/SickRage
|
refs/heads/master
|
lib/pynma/pynma.py
|
48
|
#!/usr/bin/python
from xml.dom.minidom import parseString
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
__version__ = "1.0"
API_SERVER = 'www.notifymyandroid.com'
ADD_PATH = '/publicapi/notify'
USER_AGENT="PyNMA/v%s"%__version__
def uniq_preserve(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def uniq(seq):
# Not order preserving
return list({}.fromkeys(seq).keys())
class PyNMA(object):
"""PyNMA(apikey=[], developerkey=None)
takes 2 optional arguments:
- (opt) apykey: might me a string containing 1 key or an array of keys
- (opt) developerkey: where you can store your developer key
"""
def __init__(self, apikey=[], developerkey=None):
self._developerkey = None
self.developerkey(developerkey)
if apikey:
if type(apikey) == str:
apikey = [apikey]
self._apikey = uniq(apikey)
def addkey(self, key):
"Add a key (register ?)"
if type(key) == str:
if not key in self._apikey:
self._apikey.append(key)
elif type(key) == list:
for k in key:
if not k in self._apikey:
self._apikey.append(k)
def delkey(self, key):
"Removes a key (unregister ?)"
if type(key) == str:
if key in self._apikey:
self._apikey.remove(key)
elif type(key) == list:
for k in key:
if key in self._apikey:
self._apikey.remove(k)
def developerkey(self, developerkey):
"Sets the developer key (and check it has the good length)"
if type(developerkey) == str and len(developerkey) == 48:
self._developerkey = developerkey
def push(self, application="", event="", description="", url="", contenttype=None, priority=0, batch_mode=False, html=False):
"""Pushes a message on the registered API keys.
takes 5 arguments:
- (req) application: application name [256]
- (req) event: event name [1000]
- (req) description: description [10000]
- (opt) url: url [512]
- (opt) contenttype: Content Type (act: None (plain text) or text/html)
- (opt) priority: from -2 (lowest) to 2 (highest) (def:0)
- (opt) batch_mode: push to all keys at once (def:False)
- (opt) html: shortcut for contenttype=text/html
Warning: using batch_mode will return error only if all API keys are bad
cf: http://nma.usk.bz/api.php
"""
datas = {
'application': application[:256].encode('utf8'),
'event': event[:1024].encode('utf8'),
'description': description[:10000].encode('utf8'),
'priority': priority
}
if url:
datas['url'] = url[:512]
if contenttype == "text/html" or html == True: # Currently only accepted content type
datas['content-type'] = "text/html"
if self._developerkey:
datas['developerkey'] = self._developerkey
results = {}
if not batch_mode:
for key in self._apikey:
datas['apikey'] = key
res = self.callapi('POST', ADD_PATH, datas)
results[key] = res
else:
datas['apikey'] = ",".join(self._apikey)
res = self.callapi('POST', ADD_PATH, datas)
results[datas['apikey']] = res
return results
def callapi(self, method, path, args):
headers = { 'User-Agent': USER_AGENT }
if method == "POST":
headers['Content-type'] = "application/x-www-form-urlencoded"
http_handler = HTTPSConnection(API_SERVER)
http_handler.request(method, path, urlencode(args), headers)
resp = http_handler.getresponse()
try:
res = self._parse_reponse(resp.read())
except Exception as e:
res = {'type': "pynmaerror",
'code': 600,
'message': str(e)
}
pass
return res
def _parse_reponse(self, response):
root = parseString(response).firstChild
for elem in root.childNodes:
if elem.nodeType == elem.TEXT_NODE: continue
if elem.tagName == 'success':
res = dict(list(elem.attributes.items()))
res['message'] = ""
res['type'] = elem.tagName
return res
if elem.tagName == 'error':
res = dict(list(elem.attributes.items()))
res['message'] = elem.firstChild.nodeValue
res['type'] = elem.tagName
return res
|
AOSPU/external_chromium_org_tools_gyp
|
refs/heads/android-5.0/py3
|
test/lib/TestCommon.py
|
307
|
"""
TestCommon.py: a testing framework for commands and scripts
with commonly useful error handling
The TestCommon module provides a simple, high-level interface for writing
tests of executable commands and scripts, especially commands and scripts
that interact with the file system. All methods throw exceptions and
exit on failure, with useful error messages. This makes a number of
explicit checks unnecessary, making the test scripts themselves simpler
to write and easier to read.
The TestCommon class is a subclass of the TestCmd class. In essence,
TestCommon is a wrapper that handles common TestCmd error conditions in
useful ways. You can use TestCommon directly, or subclass it for your
program and add additional (or override) methods to tailor it to your
program's specific needs. Alternatively, the TestCommon class serves
as a useful example of how to define your own TestCmd subclass.
As a subclass of TestCmd, TestCommon provides access to all of the
variables and methods from the TestCmd module. Consequently, you can
use any variable or method documented in the TestCmd module without
having to explicitly import TestCmd.
A TestCommon environment object is created via the usual invocation:
import TestCommon
test = TestCommon.TestCommon()
You can use all of the TestCmd keyword arguments when instantiating a
TestCommon object; see the TestCmd documentation for details.
Here is an overview of the methods and keyword arguments that are
provided by the TestCommon class:
test.must_be_writable('file1', ['file2', ...])
test.must_contain('file', 'required text\n')
test.must_contain_all_lines(output, lines, ['title', find])
test.must_contain_any_line(output, lines, ['title', find])
test.must_exist('file1', ['file2', ...])
test.must_match('file', "expected contents\n")
test.must_not_be_writable('file1', ['file2', ...])
test.must_not_contain('file', 'banned text\n')
test.must_not_contain_any_line(output, lines, ['title', find])
test.must_not_exist('file1', ['file2', ...])
test.run(options = "options to be prepended to arguments",
stdout = "expected standard output from the program",
stderr = "expected error output from the program",
status = expected_status,
match = match_function)
The TestCommon module also provides the following variables
TestCommon.python_executable
TestCommon.exe_suffix
TestCommon.obj_suffix
TestCommon.shobj_prefix
TestCommon.shobj_suffix
TestCommon.lib_prefix
TestCommon.lib_suffix
TestCommon.dll_prefix
TestCommon.dll_suffix
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCommon.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import copy
import os
import os.path
import stat
import string
import sys
import types
import UserList
from TestCmd import *
from TestCmd import __all__
__all__.extend([ 'TestCommon',
'exe_suffix',
'obj_suffix',
'shobj_prefix',
'shobj_suffix',
'lib_prefix',
'lib_suffix',
'dll_prefix',
'dll_suffix',
])
# Variables that describe the prefixes and suffixes on this system.
if sys.platform == 'win32':
exe_suffix = '.exe'
obj_suffix = '.obj'
shobj_suffix = '.obj'
shobj_prefix = ''
lib_prefix = ''
lib_suffix = '.lib'
dll_prefix = ''
dll_suffix = '.dll'
elif sys.platform == 'cygwin':
exe_suffix = '.exe'
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = ''
dll_suffix = '.dll'
elif string.find(sys.platform, 'irix') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.o'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
elif string.find(sys.platform, 'darwin') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
elif string.find(sys.platform, 'sunos') != -1:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = 'so_'
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.dylib'
else:
exe_suffix = ''
obj_suffix = '.o'
shobj_suffix = '.os'
shobj_prefix = ''
lib_prefix = 'lib'
lib_suffix = '.a'
dll_prefix = 'lib'
dll_suffix = '.so'
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
def is_writable(f):
mode = os.stat(f)[stat.ST_MODE]
return mode & stat.S_IWUSR
def separate_files(flist):
existing = []
missing = []
for f in flist:
if os.path.exists(f):
existing.append(f)
else:
missing.append(f)
return existing, missing
def _failed(self, status = 0):
if self.status is None or status is None:
return None
try:
return _status(self) not in status
except TypeError:
# status wasn't an iterable
return _status(self) != status
def _status(self):
return self.status
class TestCommon(TestCmd):
# Additional methods from the Perl Test::Cmd::Common module
# that we may wish to add in the future:
#
# $test->subdir('subdir', ...);
#
# $test->copy('src_file', 'dst_file');
def __init__(self, **kw):
"""Initialize a new TestCommon instance. This involves just
calling the base class initialization, and then changing directory
to the workdir.
"""
apply(TestCmd.__init__, [self], kw)
os.chdir(self.workdir)
def must_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
not writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
unwritable = filter(lambda x, iw=is_writable: not iw(x), existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if unwritable:
print "Unwritable files: `%s'" % string.join(unwritable, "', `")
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb'):
"""Ensures that the specified file contains the required text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, required) != -1)
if not contains:
print "File `%s' does not contain required string." % file
print self.banner('Required string ')
print required
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains all of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
missing = []
for line in lines:
if not find(output, line):
missing.append(line)
if missing:
if title is None:
title = 'output'
sys.stdout.write("Missing expected lines from %s:\n" % title)
for line in missing:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
contains at least one of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
for line in lines:
if find(output, line):
return
if title is None:
title = 'output'
sys.stdout.write("Missing any expected line from %s:\n" % title)
for line in lines:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_contain_lines(self, lines, output, title=None):
# Deprecated; retain for backwards compatibility.
return self.must_contain_all_lines(output, lines, title)
def must_exist(self, *files):
"""Ensures that the specified file(s) must exist. An individual
file be specified as a list of directory names, in which case the
pathname will be constructed by concatenating them. Exits FAILED
if any of the files does not exist.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
missing = filter(lambda x: not os.path.exists(x), files)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb'):
"""Matches the contents of the specified file (first argument)
against the expected contents (second argument). The expected
contents are a list of lines or a string which will be split
on newlines.
"""
file_contents = self.read(file, mode)
try:
self.fail_test(not self.match(file_contents, expect))
except KeyboardInterrupt:
raise
except:
print "Unexpected contents of `%s'" % file
self.diff(expect, file_contents, 'contents ')
raise
def must_not_contain(self, file, banned, mode = 'rb'):
"""Ensures that the specified file doesn't contain the banned text.
"""
file_contents = self.read(file, mode)
contains = (string.find(file_contents, banned) != -1)
if contains:
print "File `%s' contains banned string." % file
print self.banner('Banned string ')
print banned
print self.banner('%s contents ' % file)
print file_contents
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
"""Ensures that the specified output string (first argument)
does not contain any of the specified lines (second argument).
An optional third argument can be used to describe the type
of output being searched, and only shows up in failure output.
An optional fourth argument can be used to supply a different
function, of the form "find(line, output), to use when searching
for lines in the output.
"""
if find is None:
find = lambda o, l: string.find(o, l) != -1
unexpected = []
for line in lines:
if find(output, line):
unexpected.append(line)
if unexpected:
if title is None:
title = 'output'
sys.stdout.write("Unexpected lines in %s:\n" % title)
for line in unexpected:
sys.stdout.write(' ' + repr(line) + '\n')
sys.stdout.write(self.banner(title + ' '))
sys.stdout.write(output)
self.fail_test()
def must_not_contain_lines(self, lines, output, title=None):
return self.must_not_contain_any_line(output, lines, title)
def must_not_exist(self, *files):
"""Ensures that the specified file(s) must not exist.
An individual file be specified as a list of directory names, in
which case the pathname will be constructed by concatenating them.
Exits FAILED if any of the files exists.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing = filter(os.path.exists, files)
if existing:
print "Unexpected files exist: `%s'" % string.join(existing, "', `")
self.fail_test(existing)
def must_not_be_writable(self, *files):
"""Ensures that the specified file(s) exist and are not writable.
An individual file can be specified as a list of directory names,
in which case the pathname will be constructed by concatenating
them. Exits FAILED if any of the files does not exist or is
writable.
"""
files = map(lambda x: is_List(x) and apply(os.path.join, x) or x, files)
existing, missing = separate_files(files)
writable = filter(is_writable, existing)
if missing:
print "Missing files: `%s'" % string.join(missing, "', `")
if writable:
print "Writable files: `%s'" % string.join(writable, "', `")
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
actual_stderr, expected_stderr, status, match):
"""
Post-processes running a subcommand, checking for failure
status and displaying output appropriately.
"""
if _failed(self, status):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
print "%s returned %s%s" % (self.program, str(_status(self)), expect)
print self.banner('STDOUT ')
print actual_stdout
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stdout is None and not match(actual_stdout, expected_stdout):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
print self.banner('STDERR ')
print actual_stderr
self.fail_test()
if not expected_stderr is None and not match(actual_stderr, expected_stderr):
print self.banner('STDOUT ')
print actual_stdout
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
This handles the "options" keyword argument and exceptions.
"""
options = kw.pop('options', None)
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
try:
return apply(TestCmd.start,
(self, program, interpreter, arguments, universal_newlines),
kw)
except KeyboardInterrupt:
raise
except Exception, e:
print self.banner('STDOUT ')
try:
print self.stdout()
except IndexError:
pass
print self.banner('STDERR ')
try:
print self.stderr()
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
raise e
def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument. Additional arguments are similar
to those of the run() method:
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
"""
apply(TestCmd.finish, (self, popen,), kw)
match = kw.get('match', self.match)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def run(self, options = None, arguments = None,
stdout = None, stderr = '', status = 0, **kw):
"""Runs the program under test, checking that the test succeeded.
The arguments are the same as the base TestCmd.run() method,
with the addition of:
options Extra options that get appended to the beginning
of the arguments.
stdout The expected standard output from
the command. A value of None means
don't test standard output.
stderr The expected error output from
the command. A value of None means
don't test error output.
status The expected exit status from the
command. A value of None means don't
test exit status.
By default, this expects a successful exit (status = 0), does
not test standard output (stdout = None), and expects that error
output is empty (stderr = "").
"""
if options:
if arguments is None:
arguments = options
else:
arguments = options + " " + arguments
kw['arguments'] = arguments
match = kw.pop('match', self.match)
apply(TestCmd.run, [self], kw)
self._complete(self.stdout(), stdout,
self.stderr(), stderr, status, match)
def skip_test(self, message="Skipping test.\n"):
"""Skips a test.
Proper test-skipping behavior is dependent on the external
TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
In either case, we print the specified message as an indication
that the substance of the test was skipped.
(This was originally added to support development under Aegis.
Technically, skipping a test is a NO RESULT, but Aegis would
treat that as a test failure and prevent the change from going to
the next step. Since we ddn't want to force anyone using Aegis
to have to install absolutely every tool used by the tests, we
would actually report to Aegis that a skipped test has PASSED
so that the workflow isn't held up.)
"""
if message:
sys.stdout.write(message)
sys.stdout.flush()
pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
if pass_skips in [None, 0, '0']:
# skip=1 means skip this function when showing where this
# result came from. They only care about the line where the
# script called test.skip_test(), not the line number where
# we call test.no_result().
self.no_result(skip=1)
else:
# We're under the development directory for this change,
# so this is an Aegis invocation; pass the test (exit 0).
self.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gsnbng/erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/employee_onboarding_activity/employee_onboarding_activity.py
|
18
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class EmployeeOnboardingActivity(Document):
pass
|
web30s/odoo-9.0c-20160402
|
refs/heads/master
|
hello/templates/openerp/addons/sale_stock/company.py
|
44
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import fields, models
class company(models.Model):
_inherit = 'res.company'
security_lead = fields.Float('Sales Safety Days', required=True, default = 0.0,
help="Margin of error for dates promised to customers. "\
"Products will be scheduled for procurement and delivery "\
"that many days earlier than the actual promised date, to "\
"cope with unexpected delays in the supply chain.")
|
yidongxiainl/lammps
|
refs/heads/master
|
bench/KEPLER/build.py
|
35
|
#!/usr/local/bin/python
# Syntax: build.py target1 target2 ...
# targets:
# cpu, opt, omp,
# gpu/double, gpu/mixed, gpu/single,
# cuda/double, cuda/mixed, cuda/single,
# intel/cpu, intel/phi,
# kokkos/omp, kokkos/phi, kokkos/cuda
# gpu = gpu/double + gpu/mixed + gpu/single
# cuda = cuda/double + cuda/mixed + cuda/single
# intel = intel/cpu + intel/phi
# kokkos = kokkos/omp + kokkos/phi + kokkos/cuda
# all = cpu + opt + omp + gpu + cuda + intel + kokkos
# create exectuables for different packages
# MUST set lmpdir to path of LAMMPS home directory
import sys,commands,os
lmpdir = "~/lammps"
# build LAMMPS
# copy makefile into src/MAKE as Makefile.foo, then remove it
def build_lammps(makefile,pkg):
print "Building LAMMPS with %s and %s packages ..." % (makefile,pkg)
commands.getoutput("cp %s %s/src/MAKE/Makefile.foo" % (makefile,lmpdir))
cwd = os.getcwd()
os.chdir(os.path.expanduser(lmpdir + "/src"))
str = "make clean-foo"
txt = commands.getoutput(str)
str = "make no-all"
txt = commands.getoutput(str)
for package in pkg:
str = "make yes-%s" % package
txt = commands.getoutput(str)
print txt
str = "make -j 16 foo"
txt = commands.getoutput(str)
os.remove("MAKE/Makefile.foo")
os.chdir(cwd)
# build GPU library in LAMMPS
# copy makefile into lib/gpu as Makefile.foo, then remove it
def build_gpu(makefile):
print "Building GPU lib with %s ..." % makefile
commands.getoutput("cp %s %s/lib/gpu/Makefile.foo" % (makefile,lmpdir))
cwd = os.getcwd()
os.chdir(os.path.expanduser(lmpdir + "/lib/gpu"))
str = "make -f Makefile.foo clean"
txt = commands.getoutput(str)
str = "make -j 16 -f Makefile.foo"
txt = commands.getoutput(str)
os.remove("Makefile.foo")
os.chdir(cwd)
# build CUDA library in LAMMPS
# set precision and arch explicitly as options to make in lib/cuda
def build_cuda(precision,arch):
print "Building USER-CUDA lib with %s and arch sm_%d ..." % (precision,arch)
cwd = os.getcwd()
os.chdir(os.path.expanduser(lmpdir + "/lib/cuda"))
str = "make clean"
txt = commands.getoutput(str)
if precision == "double": pflag = 2
elif precision == "mixed": pflag = 4
elif precision == "single": pflag = 1
str = "make -j 16 precision=%d arch=%s" % (pflag,arch)
txt = commands.getoutput(str)
os.chdir(cwd)
# main program
# convert target keywords into target flags
cpu = opt = omp = 0
gpu = gpu_double = gpu_mixed = gpu_single = 0
cuda = cuda_double = cuda_mixed = cuda_single = 0
intel = intel_cpu = intel_phi = 0
kokkos = kokkos_omp = kokkos_phi = kokkos_cuda = 0
targets = sys.argv[1:]
for target in targets:
if target == "cpu": cpu = 1
elif target == "opt": opt = 1
elif target == "omp": omp = 1
elif target == "gpu/double": gpu_double = 1
elif target == "gpu/mixed": gpu_mixed = 1
elif target == "gpu/single": gpu_single = 1
elif target == "gpu": gpu = 1
elif target == "cuda/double": cuda_double = 1
elif target == "cuda/mixed": cuda_mixed = 1
elif target == "cuda/single": cuda_single = 1
elif target == "cuda": cuda = 1
elif target == "intel/cpu": intel_cpu = 1
elif target == "intel/phi": intel_phi = 1
elif target == "intel": intel = 1
elif target == "kokkos/omp": kokkos_omp = 1
elif target == "kokkos/phi": kokkos_phi = 1
elif target == "kokkos/cuda": kokkos_cuda = 1
elif target == "kokkos": kokkos = 1
elif target == "all": cpu = omp = gpu = cuda = intel = kokkos = 1
else: print "Target",target,"is unknown"
if gpu: gpu_double = gpu_mixed = gpu_single = 1
if cuda: cuda_double = cuda_mixed = cuda_single = 1
if intel: intel_cpu = intel_phi = 1
if kokkos: kokkos_omp = kokkos_phi = kokkos_cuda = 1
# CPU
if cpu:
build_lammps(makefile = "Makefile.cpu", pkg = [])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cpu" % lmpdir)
# OPT
if opt:
build_lammps(makefile = "Makefile.opt", pkg = ["opt"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_opt" % lmpdir)
# OMP
if omp:
build_lammps(makefile = "Makefile.omp", pkg = ["user-omp"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_omp" % lmpdir)
# GPU, 3 precisions
if gpu_double:
build_gpu(makefile = "Makefile.gpu.double")
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_double" % lmpdir)
if gpu_mixed:
build_gpu(makefile = "Makefile.gpu.mixed")
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_mixed" % lmpdir)
if gpu_single:
build_gpu(makefile = "Makefile.gpu.single")
build_lammps(makefile = "Makefile.gpu", pkg = ["gpu"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_gpu_single" % lmpdir)
# CUDA, 3 precisions
if cuda_double:
build_cuda(precision = "double", arch = 35)
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_double" % lmpdir)
if cuda_mixed:
build_cuda(precision = "mixed", arch = 35)
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_mixed" % lmpdir)
if cuda_single:
build_cuda(precision = "single", arch = 35)
build_lammps(makefile = "Makefile.cuda", pkg = ["kspace","user-cuda"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_cuda_single" % lmpdir)
# INTEL, CPU and Phi
if intel_cpu:
build_lammps(makefile = "Makefile.intel.cpu", pkg = ["user-intel"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_intel_cpu" % lmpdir)
if intel_phi:
build_lammps(makefile = "Makefile.intel.phi", pkg = ["user-intel","user-omp"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_intel_phi" % lmpdir)
# KOKKOS, all variants
if kokkos_omp:
build_lammps(makefile = "Makefile.kokkos.omp", pkg = ["kokkos"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_omp" % lmpdir)
if kokkos_phi:
build_lammps(makefile = "Makefile.kokkos.phi", pkg = ["kokkos"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_phi" % lmpdir)
if kokkos_cuda:
build_lammps(makefile = "Makefile.kokkos.cuda", pkg = ["kokkos"])
print commands.getoutput("mv %s/src/lmp_foo ./lmp_kokkos_cuda" % lmpdir)
|
CollabQ/CollabQ
|
refs/heads/master
|
vendor/django/contrib/gis/tests/relatedapp/tests_mysql.py
|
45
|
from tests import *
|
tethysplatform/TethysCluster
|
refs/heads/master
|
tethyscluster/commands/terminate.py
|
2
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of TethysCluster.
#
# TethysCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TethysCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TethysCluster. If not, see <http://www.gnu.org/licenses/>.
from tethyscluster import exception
from tethyscluster.logger import log
from completers import ClusterCompleter
class CmdTerminate(ClusterCompleter):
"""
terminate [options] <cluster_tag> ...
Terminate a running or stopped cluster
Example:
$ tethyscluster terminate mycluster
This will terminate a currently running or stopped cluster tagged
"mycluster".
All nodes will be terminated, all spot requests (if any) will be
cancelled, and the cluster's security group will be removed. If the
cluster uses EBS-backed nodes then each node's root volume will be
deleted. If the cluster uses "cluster compute" instance types the
cluster's placement group will also be removed.
"""
names = ['terminate']
def addopts(self, parser):
parser.add_option("-c", "--confirm", dest="confirm",
action="store_true", default=False,
help="Do not prompt for confirmation, "
"just terminate the cluster")
parser.add_option("-f", "--force", dest="force", action="store_true",
default=False, help="Terminate cluster regardless "
"of errors if possible ")
def _terminate_cluster(self, cl):
if not self.opts.confirm:
action = 'Terminate'
if cl.is_ebs_cluster():
action = 'Terminate EBS'
resp = raw_input(
"%s cluster %s (y/n)? " % (action, cl.cluster_tag))
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
cl.terminate_cluster()
def _terminate_manually(self, cl):
if not self.opts.confirm:
resp = raw_input("Terminate cluster %s (y/n)? " % cl.cluster_tag)
if resp not in ['y', 'Y', 'yes']:
log.info("Aborting...")
return
insts = cl.cluster_group.instances()
for inst in insts:
log.info("Terminating %s" % (inst.id,))
inst.terminate()
cl.terminate_cluster(force=True)
def terminate(self, cluster_name, force=False):
if force:
log.warn("Ignoring cluster settings due to --force option")
try:
cl = self.cm.get_cluster(cluster_name, load_receipt=not force,
require_keys=not force)
if force:
self._terminate_manually(cl)
else:
self._terminate_cluster(cl)
except exception.ClusterDoesNotExist:
raise
except Exception:
log.error("Failed to terminate cluster!", exc_info=True)
if not force:
log.error("Use -f to forcefully terminate the cluster")
raise
def execute(self, args):
if not args:
self.parser.error("please specify a cluster")
for cluster_name in args:
try:
self.terminate(cluster_name, force=self.opts.force)
except EOFError:
print 'Interrupted, exiting...'
return
|
kuiwei/edx-platform
|
refs/heads/master
|
common/lib/html_to_text.py
|
244
|
"""Provides a function to convert html to plaintext."""
import logging
from subprocess import Popen, PIPE
log = logging.getLogger(__name__)
def html_to_text(html_message):
"""
Converts an html message to plaintext.
Currently uses lynx in a subprocess; should be refactored to
use something more pythonic.
"""
process = Popen(
['lynx', '-stdin', '-display_charset=UTF-8', '-assume_charset=UTF-8', '-dump'],
stdin=PIPE,
stdout=PIPE
)
# use lynx to get plaintext
(plaintext, err_from_stderr) = process.communicate(
input=html_message.encode('utf-8')
)
if err_from_stderr:
log.info(err_from_stderr)
return plaintext
|
ferringb/raygun4py
|
refs/heads/master
|
python3/tests/test_raygunprovider.py
|
2
|
import unittest, sys
from raygun4py import raygunprovider
from raygun4py import utilities
class TestRaygunSender(unittest.TestCase):
def setUp(self):
self.sender = raygunprovider.RaygunSender('invalidapikey')
self.handler = raygunprovider.RaygunHandler('testkey', 'v1.0')
def test_apikey(self):
self.assertEqual(self.sender.apiKey, 'invalidapikey')
def test_handler_apikey(self):
self.assertEqual(self.handler.sender.apiKey, 'testkey')
def test_handler_version(self):
self.assertEqual(self.handler.version, 'v1.0')
def test_sending_403_with_invalid_key(self):
try:
raise Exception('test')
except Exception as e:
info = sys.exc_info()
http_result = self.sender.send_exception(exc_info=info)
self.assertEqual(http_result[0], 403)
def test_ignore_exceptions(self):
ex = ['Exception']
self.sender.ignore_exceptions(ex)
self.assertEqual(self.sender.ignoredExceptions, ex)
def test_filter_keys_set(self):
keys = ['credit_card']
self.sender.filter_keys(keys)
self.assertEqual(self.sender.filteredKeys, keys)
def test_filter_keys_filters_error(self):
keys = ['identifier']
self.sender.filter_keys(keys)
self.sender.set_user({ 'identifier': 'foo' })
self.assertEqual(utilities.filter_keys(keys, self.sender.user)['identifier'], '<filtered>')
def test_set_transmitLocalVariables(self):
self.sender = raygunprovider.RaygunSender('foo', config={ 'transmitLocalVariables': False })
self.assertFalse(self.sender.transmitLocalVariables)
def test_default_transmitLocalVariables(self):
self.sender = raygunprovider.RaygunSender('foo')
self.assertTrue(self.sender.transmitLocalVariables)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
GioneeDevTeam/android_kernel_gionee_msm8974
|
refs/heads/cm-10.1
|
scripts/build-all.py
|
305
|
#! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
jumpstarter-io/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/access_and_security/floating_ips/tests.py
|
1
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from horizon.workflows import views
INDEX_URL = reverse('horizon:project:access_and_security:index')
NAMESPACE = "horizon:project:access_and_security:floating_ips"
class FloatingIpViewTests(test.TestCase):
def test_associate(self):
self.mox.StubOutWithMock(api.network, 'floating_ip_target_list')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
url = reverse('%s:associate' % NAMESPACE)
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
def test_associate_post(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_associate')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'floating_ip_target_list')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_associate_post_with_redirect(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_associate')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'floating_ip_target_list')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
next = reverse("horizon:project:instances:index")
res = self.client.post("%s?next=%s" % (url, next), form_data)
self.assertRedirectsNoFollow(res, next)
def test_associate_post_with_exception(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_associate')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'floating_ip_target_list')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_disassociate_post(self):
floating_ip = self.floating_ips.first()
server = self.servers.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_get')
self.mox.StubOutWithMock(api.network, 'floating_ip_disassociate')
self.mox.StubOutWithMock(api.nova, 'server_list')
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_disassociate_post_with_exception(self):
floating_ip = self.floating_ips.first()
server = self.servers.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_get')
self.mox.StubOutWithMock(api.network, 'floating_ip_disassociate')
self.mox.StubOutWithMock(api.nova, 'server_list')
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id,
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRedirectsNoFollow(res, INDEX_URL)
class FloatingIpNeutronViewTests(FloatingIpViewTests):
def setUp(self):
super(FloatingIpViewTests, self).setUp()
self._floating_ips_orig = self.floating_ips
self.floating_ips = self.floating_ips_uuid
def tearDown(self):
self.floating_ips = self._floating_ips_orig
super(FloatingIpViewTests, self).tearDown()
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.cinder: ('tenant_quota_get', 'volume_list',
'volume_snapshot_list',),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'tenant_quota_get')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
api.cinder.volume_list(IsA(http.HttpRequest)) \
.AndReturn(self.volumes.list())
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)) \
.AndReturn(self.snapshots.list())
api.cinder.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.cinder_quotas.first())
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
|
mayfieldrobotics/rosbridge_suite
|
refs/heads/develop
|
rosbridge_library/src/rosbridge_library/capabilities/fragmentation.py
|
12
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from rosbridge_library.capability import Capability
import math
class Fragmentation(Capability):
""" The Fragmentation capability doesn't define any incoming operation
handlers, but provides methods to fragment outgoing messages """
fragmentation_seed = 0
def __init__(self, protocol):
# Call superclass constructor
Capability.__init__(self, protocol)
def fragment(self, message, fragment_size, mid=None):
""" Serializes the provided message, then splits the serialized
message according to fragment_size, then sends the fragments.
If the size of the message is less than the fragment size, then
the original message is returned rather than a single fragment
Since fragmentation is typically only used for very large messages,
this method returns a generator for fragments rather than a list
Keyword Arguments
message -- the message dict object to be fragmented
fragment_size -- the max size for the fragments
mid -- (optional) if provided, the fragment messages
will be given this id. Otherwise an id will be auto-generated.
Returns a generator of message dict objects representing the fragments
"""
# All fragmented messages need an ID so they can be reconstructed
if mid is None:
mid = self.fragmentation_seed
self.fragmentation_seed = self.fragmentation_seed + 1
serialized = self.protocol.serialize(message, mid)
if serialized is None:
return []
message_length = len(serialized)
if message_length <= fragment_size:
return [message]
msg_id = message.get("id", None)
expected_duration = int(math.ceil(math.ceil(message_length / float(fragment_size))) * self.protocol.delay_between_messages)
log_msg = "sending " + str(int(math.ceil(message_length / float(fragment_size)))) + " parts [fragment size: " + str(fragment_size) +"; expected duration: ~" + str(expected_duration) + "s]"
self.protocol.log("info", log_msg)
return self._fragment_generator(serialized, fragment_size, mid)
def _fragment_generator(self, msg, size, mid):
""" Returns a generator of fragment messages """
total = ((len(msg)-1) / size) + 1
n = 0
for i in range(0, len(msg), size):
fragment = msg[i:i+size]
yield self._create_fragment(fragment, n, total, mid)
n = n + 1
def _create_fragment(self, fragment, num, total, mid):
""" Given a string fragment of the original message, creates
the appropriate fragment message """
return {
"op": "fragment",
"id": mid,
"data": fragment,
"num": num,
"total": total
}
|
Astyan-42/skepticalscience
|
refs/heads/master
|
skepticalsciencewebsite/faq/tests/test_models.py
|
1
|
import mock
from django.test import TestCase
from faq.models import Topic, QandA
class TopicTestCase(TestCase):
def test__str__(self):
topic = mock.Mock(spec=Topic)
self.assertEqual(Topic.__str__(topic), topic.name)
class QandATestCase(TestCase):
def test__str__(self):
qanda = mock.Mock(spec=QandA)
self.assertEqual(QandA.__str__(qanda), qanda.question)
|
frankito9999/Ecommerce-OAuth-Stripe-Bitcoin
|
refs/heads/master
|
node_modules/laravel-elixir/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
|
505
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
return [product_group, project_ref]
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y: CompareProducts(x, y, remote_products))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 45],
'rootObject': [0, PBXProject, 1, 1],
})
def SetXcodeVersion(self, version):
version_to_object_version = {
'2.4': 45,
'3.0': 45,
'3.1': 45,
'3.2': 46,
}
if not version in version_to_object_version:
supported_str = ', '.join(sorted(version_to_object_version.keys()))
raise Exception(
'Unsupported Xcode version %s (supported: %s)' %
( version, supported_str ) )
compatibility_version = 'Xcode %s' % version
self._properties['rootObject'].SetProperty('compatibilityVersion',
compatibility_version)
self.SetProperty('objectVersion', version_to_object_version[version]);
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
|
kevinthesun/mxnet
|
refs/heads/master
|
example/rcnn/rcnn/symbol/proposal.py
|
41
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Proposal Operator transform anchor coordinates into ROI coordinates with prediction results on
classification probability and bounding box prediction results, and image size and scale information.
"""
import mxnet as mx
import numpy as np
import numpy.random as npr
from distutils.util import strtobool
from rcnn.logger import logger
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.generate_anchor import generate_anchors
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class ProposalOperator(mx.operator.CustomOp):
def __init__(self, feat_stride, scales, ratios, output_score,
rpn_pre_nms_top_n, rpn_post_nms_top_n, threshold, rpn_min_size):
super(ProposalOperator, self).__init__()
self._feat_stride = feat_stride
self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',')
self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',')
self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios)
self._num_anchors = self._anchors.shape[0]
self._output_score = output_score
self._rpn_pre_nms_top_n = rpn_pre_nms_top_n
self._rpn_post_nms_top_n = rpn_post_nms_top_n
self._threshold = threshold
self._rpn_min_size = rpn_min_size
logger.debug('feat_stride: %s' % self._feat_stride)
logger.debug('anchors:\n%s' % self._anchors)
def forward(self, is_train, req, in_data, out_data, aux):
nms = gpu_nms_wrapper(self._threshold, in_data[0].context.device_id)
batch_size = in_data[0].shape[0]
if batch_size > 1:
raise ValueError("Sorry, multiple images each device is not implemented")
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
pre_nms_topN = self._rpn_pre_nms_top_n
post_nms_topN = self._rpn_post_nms_top_n
min_size = self._rpn_min_size
# the first set of anchors are background probabilities
# keep the second part
scores = in_data[0].asnumpy()[:, self._num_anchors:, :, :]
bbox_deltas = in_data[1].asnumpy()
im_info = in_data[2].asnumpy()[0, :]
logger.debug('im_info: %s' % im_info)
# 1. Generate proposals from bbox_deltas and shifted anchors
# use real image size instead of padded feature map sizes
height, width = int(im_info[0] / self._feat_stride), int(im_info[1] / self._feat_stride)
logger.debug('score map size: (%d, %d)' % (scores.shape[2], scores.shape[3]))
logger.debug('resudial: (%d, %d)' % (scores.shape[2] - height, scores.shape[3] - width))
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = self._clip_pad(bbox_deltas, (height, width))
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = self._clip_pad(scores, (height, width))
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_pred(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = self._filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
det = np.hstack((proposals, scores)).astype(np.float32)
keep = nms(det)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
# pad to ensure output size remains unchanged
if len(keep) < post_nms_topN:
pad = npr.choice(keep, size=post_nms_topN - len(keep))
keep = np.hstack((keep, pad))
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois array
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
self.assign(out_data[0], req[0], blob)
if self._output_score:
self.assign(out_data[1], req[1], scores.astype(np.float32, copy=False))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
self.assign(in_grad[2], req[2], 0)
@staticmethod
def _filter_boxes(boxes, min_size):
""" Remove all boxes with any side smaller than min_size """
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
@staticmethod
def _clip_pad(tensor, pad_shape):
"""
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
"""
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor
@mx.operator.register("proposal")
class ProposalProp(mx.operator.CustomOpProp):
def __init__(self, feat_stride='16', scales='(8, 16, 32)', ratios='(0.5, 1, 2)', output_score='False',
rpn_pre_nms_top_n='6000', rpn_post_nms_top_n='300', threshold='0.3', rpn_min_size='16'):
super(ProposalProp, self).__init__(need_top_grad=False)
self._feat_stride = int(feat_stride)
self._scales = scales
self._ratios = ratios
self._output_score = strtobool(output_score)
self._rpn_pre_nms_top_n = int(rpn_pre_nms_top_n)
self._rpn_post_nms_top_n = int(rpn_post_nms_top_n)
self._threshold = float(threshold)
self._rpn_min_size = int(rpn_min_size)
def list_arguments(self):
return ['cls_prob', 'bbox_pred', 'im_info']
def list_outputs(self):
if self._output_score:
return ['output', 'score']
else:
return ['output']
def infer_shape(self, in_shape):
cls_prob_shape = in_shape[0]
bbox_pred_shape = in_shape[1]
assert cls_prob_shape[0] == bbox_pred_shape[0], 'ROI number does not equal in cls and reg'
batch_size = cls_prob_shape[0]
im_info_shape = (batch_size, 3)
output_shape = (self._rpn_post_nms_top_n, 5)
score_shape = (self._rpn_post_nms_top_n, 1)
if self._output_score:
return [cls_prob_shape, bbox_pred_shape, im_info_shape], [output_shape, score_shape]
else:
return [cls_prob_shape, bbox_pred_shape, im_info_shape], [output_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalOperator(self._feat_stride, self._scales, self._ratios, self._output_score,
self._rpn_pre_nms_top_n, self._rpn_post_nms_top_n, self._threshold, self._rpn_min_size)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
|
franek/weboob
|
refs/heads/master
|
modules/redmine/__init__.py
|
5
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .backend import RedmineBackend
__all__ = ['RedmineBackend']
|
timpalpant/calibre
|
refs/heads/master
|
src/calibre/ebooks/conversion/plugins/__init__.py
|
96
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
|
bzbarsky/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
|
235
|
def test_quick(setup):
pass
|
daiyinger/esp-idf
|
refs/heads/master
|
components/idf_test/integration_test/TestCaseScript/UDPStress/UDPMultiSTASendRecv.py
|
15
|
from TCAction import TCActionBase
from NativeLog import NativeLog
import time
import random
import string
class TestCase(TCActionBase.CommonTCActionBase):
def __init__(self, test_case, test_env, timeout=45, log_path=TCActionBase.LOG_PATH):
TCActionBase.CommonTCActionBase.__init__(self, test_case, test_env, timeout=timeout, log_path=log_path)
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
pass
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.result_cntx.start()
try:
# configurable params
send_len = self.send_len
test_time = self.test_time * 60
server_echo = self.server_echo
sta_number = self.sta_number
send_delay = self.send_delay
# configurable params
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for TCPTransparent script, error is %s" % e)
raise StandardError("Error configuration")
# step0 reboot
for i in range(sta_number + 1):
checker_stings = ["P SSC%d C ready!!!" % (i + 1)]
test_action_string = ["SSCC SSC%d restore" % (i + 1)]
fail_string = "Fail, Fail to restore"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# turn off recv print
for i in range(sta_number + 1):
checker_stings = ["P SSC%d C +RECVPRINT:0" % (i + 1)]
test_action_string = ["SSCC SSC%d soc -R -o 0" % (i + 1)]
fail_string = "Fail, Fail to turn off recv print"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step1, set softAP mode on SSC1
checker_stings = ["R SSC1 C +MODE:OK"]
test_action_string = ["SSCC SSC1 op -S -o 2"]
fail_string = "Fail, Fail to set mode on SSC1"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step2, set STA mode on SSC2-SSCn
for i in range(sta_number):
checker_stings = ["R SSC%d C +MODE:OK" % (i + 2)]
test_action_string = ["SSCC SSC%d op -S -o 1" % (i + 2)]
fail_string = "Fail, Fail to set mode on SSC%d" % (i + 2)
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step3, set ssid/password on SSC1
ssid = "".join([random.choice(string.lowercase) for m in range(10)])
password = "".join([random.choice(string.lowercase) for m in range(10)])
udp_port = random.randint(10000, 20000)
checker_stings = ["R SSC1 C +SAP:OK"]
test_action_string = ["SSCC SSC1 ap -S -s %s -p %s -n 10 -t 0 -m 10" % (ssid, password)]
fail_string = "Fail, Fail to set ssid/password on SSC1"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step4, all STA join SSC1(soft AP)
for i in range(sta_number):
checker_stings = ["R SSC%d C +JAP:CONNECTED,%s" % (i + 2, ssid)]
test_action_string = ["SSCC SSC%d ap -C -s %s -p %s" % (i + 2, ssid, password)]
fail_string = "Fail, Fail to connect to SSC1"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string, check_time=450) is False:
return
# step5, get all the STA ip
for i in range(sta_number):
checker_stings = ["R SSC%d A <SSC%d_IP>:\+STAIP:192.168.4.(\d+)" % (i + 2, i + 2)]
test_action_string = ["SSCC SSC%d ip -Q" % (i + 2)]
fail_string = "Fail, Fail to get SSC%d ip" % (i + 2)
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# else:
# print "SSC%d ip is:" % (i + 2), self.get_parameter("SSC%d_IP" % (i + 2))
# step6, create UDP socket on all targets
for i in range(sta_number):
checker_stings = ["R SSC%d A <sock%d>:\+BIND:(\d+),OK" % (i + 2, i + 2)]
test_action_string = ["SSCC SSC%d soc -B -t UDP -p %s" % (i + 2, udp_port + i + 2)]
fail_string = "Fail, SSC%d Fail to bind" % (i + 2)
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
start_time = time.time()
# step7, do send/recv, SSC2<---->other STAs
while time.time() - start_time < test_time:
checker_stings = []
test_action_string = []
if server_echo is True:
# SSC2 send packets to SSC3-SSCn
for i in range(sta_number - 1):
ip = "192.168.4." + self.get_parameter("SSC%d_IP" % (i + 3))
test_action_string.append(
"SSC SSC2 soc -S -s <sock%d> -i %s -p %s -l %d -n 1000 -j %d" % (
i + 3, ip, udp_port + i + 3, send_len, send_delay))
checker_stings.append(
"P SSC2 RE \+SEND:%s,OK NC CLOSED NC ERROR" % self.get_parameter("sock%d" % (i + 3)))
# SSC3-SSCn send packets to SSC2
ssc2_ip = "192.168.4." + self.get_parameter("SSC2_IP")
for i in range(sta_number - 1):
test_action_string.append(
"SSC SSC%d soc -S -s <sock%d> -i %s -p %s -l %d -n 1000 -j %d" % (
i + 3, i + 3, ssc2_ip, udp_port + 2, send_len, send_delay))
checker_stings.append(
"P SSC%d RE \+SEND:%s,OK NC CLOSED NC ERROR" % (i + 3, self.get_parameter("sock%d" % (i + 3))))
fail_string = "Fail, Failed to send/recv data"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string,
check_freq=1, check_time=300) is False:
break
# drop off the delay time if it's greater than 20ms
if send_delay > 20:
send_delay -= 10
NativeLog.add_trace_critical("time escape: %s" % (time.time() - start_time))
if (time.time() - start_time) >= test_time:
self.result_cntx.set_result("Succeed")
else:
self.result_cntx.set_result("Failed")
# finally, execute done
def result_check(self, port_name, data):
TCActionBase.CommonTCActionBase.result_check(self, port_name, data)
self.result_cntx.append_data(port_name, data)
def main():
pass
if __name__ == '__main__':
main()
|
samatdav/zulip
|
refs/heads/master
|
zerver/lib/rest.py
|
3
|
from __future__ import absolute_import
from typing import Any, Dict
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from zerver.decorator import authenticated_json_view, authenticated_rest_api_view, \
process_as_post
from zerver.lib.response import json_method_not_allowed, json_unauthorized
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.conf import settings
METHODS = ('GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'PATCH')
FLAGS = ('override_api_url_scheme')
@csrf_exempt
def rest_dispatch(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
"""Dispatch to a REST API endpoint.
Unauthenticated endpoints should not use this, as authentication is verified
in the following ways:
* for paths beginning with /api, HTTP Basic auth
* for paths beginning with /json (used by the web client), the session token
This calls the function named in kwargs[request.method], if that request
method is supported, and after wrapping that function to:
* protect against CSRF (if the user is already authenticated through
a Django session)
* authenticate via an API key (otherwise)
* coerce PUT/PATCH/DELETE into having POST-like semantics for
retrieving variables
Any keyword args that are *not* HTTP methods are passed through to the
target function.
Never make a urls.py pattern put user input into a variable called GET, POST,
etc, as that is where we route HTTP verbs to target functions.
"""
supported_methods = {} # type: Dict[str, Any]
# duplicate kwargs so we can mutate the original as we go
for arg in list(kwargs):
if arg in METHODS:
supported_methods[arg] = kwargs[arg]
del kwargs[arg]
if request.method == 'OPTIONS':
response = HttpResponse(status=204) # No content
response['Allow'] = ', '.join(supported_methods.keys())
response['Content-Length'] = "0"
return response
# Override requested method if magic method=??? parameter exists
method_to_use = request.method
if request.POST and 'method' in request.POST:
method_to_use = request.POST['method']
if method_to_use == "SOCKET" and "zulip.emulated_method" in request.META:
method_to_use = request.META["zulip.emulated_method"]
if method_to_use in supported_methods:
entry = supported_methods[method_to_use]
if isinstance(entry, tuple):
target_function, view_flags = entry
target_function = import_string(target_function)
else:
target_function = import_string(supported_methods[method_to_use])
view_flags = set()
# Set request._query for update_activity_user(), which is called
# by some of the later wrappers.
request._query = target_function.__name__
# We want to support authentication by both cookies (web client)
# and API keys (API clients). In the former case, we want to
# do a check to ensure that CSRF etc is honored, but in the latter
# we can skip all of that.
#
# Security implications of this portion of the code are minimal,
# as we should worst-case fail closed if we miscategorise a request.
# for some special views (e.g. serving a file that has been
# uploaded), we support using the same url for web and API clients.
if ('override_api_url_scheme' in view_flags and
request.META.get('HTTP_AUTHORIZATION', None) is not None):
# This request API based authentication.
target_function = authenticated_rest_api_view()(target_function)
# /json views (web client) validate with a session token (cookie)
elif not request.path.startswith("/api") and request.user.is_authenticated():
# Authenticated via sessions framework, only CSRF check needed
target_function = csrf_protect(authenticated_json_view(target_function))
# most clients (mobile, bots, etc) use HTTP Basic Auth and REST calls, where instead of
# username:password, we use email:apiKey
elif request.META.get('HTTP_AUTHORIZATION', None):
# Wrap function with decorator to authenticate the user before
# proceeding
target_function = authenticated_rest_api_view()(target_function)
# Pick a way to tell user they're not authed based on how the request was made
else:
# If this looks like a request from a top-level page in a
# browser, send the user to the login page
if 'text/html' in request.META.get('HTTP_ACCEPT', ''):
return HttpResponseRedirect('%s/?next=%s' % (settings.HOME_NOT_LOGGED_IN, request.path))
# Ask for basic auth (email:apiKey)
elif request.path.startswith("/api"):
return json_unauthorized(_("Not logged in: API authentication or user session required"))
# Session cookie expired, notify the client
else:
return json_unauthorized(_("Not logged in: API authentication or user session required"),
www_authenticate='session')
if request.method not in ["GET", "POST"]:
# process_as_post needs to be the outer decorator, because
# otherwise we might access and thus cache a value for
# request.REQUEST.
target_function = process_as_post(target_function)
return target_function(request, **kwargs)
return json_method_not_allowed(list(supported_methods.keys()))
|
xiandiancloud/edxplaltfom-xusong
|
refs/heads/master
|
lms/envs/bok_choy.py
|
12
|
"""
Settings for bok choy tests
"""
import os
from path import path
CONFIG_ROOT = path(__file__).abspath().dirname() # pylint: disable=E1120
TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root"
########################## Prod-like settings ###################################
# These should be as close as possible to the settings we use in production.
# As in prod, we read in environment and auth variables from JSON files.
# Unlike in prod, we use the JSON files stored in this repo.
# This is a convenience for ensuring (a) that we can consistently find the files
# and (b) that the files are the same in Jenkins as in local dev.
os.environ['SERVICE_VARIANT'] = 'bok_choy'
os.environ['CONFIG_ROOT'] = CONFIG_ROOT
from .aws import * # pylint: disable=W0401, W0614
######################### Testing overrides ####################################
# Needed for the reset database management command
INSTALLED_APPS += ('django_extensions',)
# Redirect to the test_root folder within the repo
GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath()
LOG_DIR = (TEST_ROOT / "log").abspath()
# Configure modulestore to use the test folder within the repo
update_module_store_settings(
MODULESTORE,
module_store_options={
'fs_root': (TEST_ROOT / "data").abspath(), # pylint: disable=E1120
},
xml_store_options={
'data_dir': (TEST_ROOT / "data").abspath(),
},
)
# Configure the LMS to use our stub XQueue implementation
XQUEUE_INTERFACE['url'] = 'http://localhost:8040'
# Configure the LMS to use our stub ORA implementation
OPEN_ENDED_GRADING_INTERFACE['url'] = 'http://localhost:8041/'
# Enable django-pipeline and staticfiles
STATIC_ROOT = (TEST_ROOT / "staticfiles").abspath()
# Silence noisy logs
import logging
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('edxmako.shortcuts', logging.ERROR),
('dd.dogapi', logging.ERROR),
('edx.discussion', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Unfortunately, we need to use debug mode to serve staticfiles
DEBUG = True
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_PORT = 9080
YOUTUBE['API'] = "127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['TEST_URL'] = "127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
|
robb-romans/robbmisc
|
refs/heads/master
|
scripts/upgrade-pip3-packages.py
|
1
|
#!/usr/bin/env python3
## Update installed pip packages
## From: https://stackoverflow.com/questions/2720014/upgrading-all-packages-with-pip
import pip
from subprocess import call
for dist in pip.get_installed_distributions():
if 'site-packages' in dist.location:
try:
call(['pip', 'install', '-U', dist.key])
except Exception as exc:
print(exc)
|
fahhem/openhtf
|
refs/heads/master
|
openhtf/util/atomic_write.py
|
5
|
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for automic_write a new file."""
import os
import tempfile
from contextlib import contextmanager
@contextmanager
def atomic_write(filename, filesync=False):
""" atomically write a file (using a temporary file).
filename: the file to be written
filesync: flush the file to disk
"""
tmpf = tempfile.NamedTemporaryFile(delete=False)
try:
with open(tmpf.name, 'w') as curfile:
yield curfile
if filesync:
curfile.flush()
os.fsync(curfile.fileno())
os.rename(tmpf.name, filename)
finally:
try:
os.remove(tmpf.name)
except (IOError, OSError):
pass
|
s40523236/2016fallcp_hw
|
refs/heads/gh-pages
|
plugin/liquid_tags/test_flickr.py
|
278
|
from . import flickr
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import os
import pytest
import re
PLUGIN_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(PLUGIN_DIR, 'test_data')
@pytest.mark.parametrize('input,expected', [
('18873146680 large "test 1"',
dict(photo_id='18873146680',
size='large',
alt='test 1')),
('18873146680 large \'test 1\'',
dict(photo_id='18873146680',
size='large',
alt='test 1')),
('18873143536360 medium "test number two"',
dict(photo_id='18873143536360',
size='medium',
alt='test number two')),
('18873143536360 small "test number 3"',
dict(photo_id='18873143536360',
size='small',
alt='test number 3')),
('18873143536360 "test 4"',
dict(photo_id='18873143536360',
size=None,
alt='test 4')),
('18873143536360',
dict(photo_id='18873143536360',
size=None,
alt=None)),
('123456 small',
dict(photo_id='123456',
size='small',
alt=None))
])
def test_regex(input, expected):
assert re.match(flickr.PARSE_SYNTAX, input).groupdict() == expected
@pytest.mark.parametrize('input,expected', [
(['1', 'server1', '1', 'secret1', 'small'],
'https://farm1.staticflickr.com/server1/1_secret1_n.jpg'),
(['2', 'server2', '2', 'secret2', 'medium'],
'https://farm2.staticflickr.com/server2/2_secret2_c.jpg'),
(['3', 'server3', '3', 'secret3', 'large'],
'https://farm3.staticflickr.com/server3/3_secret3_b.jpg')
])
def test_source_url(input, expected):
assert flickr.source_url(
input[0], input[1], input[2], input[3], input[4]) == expected
@patch('liquid_tags.flickr.urlopen')
def test_generage_html(mock_urlopen):
# mock the return to deliver the flickr.json file instead
with open(TEST_DATA_DIR + '/flickr.json', 'rb') as f:
mock_urlopen.return_value.read.return_value = f.read()
attrs = dict(
photo_id='1234567',
size='large',
alt='this is a test'
)
expected = ('<a href="https://www.flickr.com/photos/'
'marvinxsteadfast/18841055371/">'
'<img src="https://farm6.staticflickr.com/5552/1234567_'
'17ac287217_b.jpg" alt="this is a test"></a>')
assert flickr.generate_html(attrs, 'abcdef') == expected
|
rightpeter/shadowsocks
|
refs/heads/master
|
tests/graceful_cli.py
|
977
|
#!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
wevote/WebAppPublic
|
refs/heads/master
|
election/views_admin.py
|
1
|
# election/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import election_remote_retrieve, elections_import_from_master_server, elections_sync_out_list_for_api
from .models import Election
from .serializers import ElectionSerializer
from admin_tools.views import redirect_to_sign_in_page
from ballot.models import BallotReturnedListManager
from candidate.models import CandidateCampaignListManager
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception, handle_record_not_found_exception, \
handle_record_not_saved_exception
from import_export_google_civic.controllers import retrieve_one_ballot_from_google_civic_api, \
store_one_ballot_from_google_civic_api
import json
from office.models import ContestOfficeListManager
from polling_location.models import PollingLocation
from position.models import PositionListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, get_voter_device_id, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_election_id_integer
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def election_all_ballots_retrieve_view(request, election_local_id=0):
"""
Reach out to Google and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
try:
if positive_value_exists(election_local_id):
election_on_stage = Election.objects.get(id=election_local_id)
else:
election_on_stage = Election.objects.get(google_civic_election_id=google_civic_election_id)
election_local_id = election_on_stage.id
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not retrieve ballot data. More than one election found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.DoesNotExist:
messages.add_message(request, messages.ERROR, 'Could not retrieve ballot data. Election could not be found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
state = election_on_stage.get_election_state()
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state)
polling_location_count = polling_location_count_query.count()
polling_location_list = PollingLocation.objects.all()
polling_location_list = polling_location_list.filter(state__iexact=state)
# We used to have a limit of 500 ballots to pull per election, but now retrieve all
# Ordering by "location_name" creates a bit of (locational) random order
polling_location_list = polling_location_list.order_by('location_name') # [:500]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
election_name=election_on_stage.election_name,
state=state))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations returned for the state \'{state}\'. (error 2)'.format(
election_name=election_on_stage.election_name,
state=state))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
ballots_retrieved = 0
ballots_not_retrieved = 0
ballots_with_contests_retrieved = 0
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # We retrieve 10% of the total polling locations, which should give us coverage of the entire election
# number_of_polling_locations_to_retrieve = int(.1 * polling_location_count)
for polling_location in polling_location_list:
success = False
# Get the address for this polling place, and then retrieve the ballot from Google Civic API
text_for_map_search = polling_location.get_text_for_map_search()
one_ballot_results = retrieve_one_ballot_from_google_civic_api(
text_for_map_search, election_on_stage.google_civic_election_id)
if one_ballot_results['success']:
one_ballot_json = one_ballot_results['structured_json']
store_one_ballot_results = store_one_ballot_from_google_civic_api(one_ballot_json, 0,
polling_location.we_vote_id)
if store_one_ballot_results['success']:
success = True
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
if one_ballot_results['contests_retrieved']:
ballots_with_contests_retrieved += 1
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # Break out of this loop, assuming we have a minimum number of ballots with contests retrieved
# # If we don't achieve the minimum number of ballots_with_contests_retrieved, break out at the emergency level
# emergency = (ballots_retrieved + ballots_not_retrieved) >= (3 * number_of_polling_locations_to_retrieve)
# if ((ballots_retrieved + ballots_not_retrieved) >= number_of_polling_locations_to_retrieve and
# ballots_with_contests_retrieved > 20) or emergency:
# break
if ballots_retrieved > 0:
total_retrieved = ballots_retrieved + ballots_not_retrieved
messages.add_message(request, messages.INFO,
'Ballot data retrieved from Google Civic for the {election_name}. '
'(ballots retrieved: {ballots_retrieved} '
'(with contests: {ballots_with_contests_retrieved}), '
'not retrieved: {ballots_not_retrieved}, '
'total: {total})'.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
ballots_with_contests_retrieved=ballots_with_contests_retrieved,
election_name=election_on_stage.election_name,
total=total_retrieved))
else:
messages.add_message(request, messages.ERROR,
'Ballot data NOT retrieved from Google Civic for the {election_name}.'
' (not retrieved: {ballots_not_retrieved})'.format(
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_on_stage.election_name))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
@login_required
def election_edit_view(request, election_local_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_local_id = convert_to_int(election_local_id)
election_on_stage_found = False
election_on_stage = Election()
if positive_value_exists(election_local_id):
try:
election_on_stage = Election.objects.get(id=election_local_id)
election_on_stage_found = True
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Election.DoesNotExist:
# This is fine, create new
pass
else:
# If here we are creating a
pass
if election_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'election': election_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, "election/election_edit.html", template_values)
@login_required()
def election_edit_process_view(request):
"""
Process the new or edit election forms
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
election_local_id = convert_to_int(request.POST.get('election_local_id', 0))
election_name = request.POST.get('election_name', False)
election_day_text = request.POST.get('election_day_text', False)
state_code = request.POST.get('state_code', False)
election_on_stage = Election()
election_changed = False
# Check to see if this election is already being used anywhere
election_on_stage_found = False
try:
election_query = Election.objects.filter(id=election_local_id)
if len(election_query):
election_on_stage = election_query[0]
election_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if election_on_stage_found:
if convert_to_int(election_on_stage.google_civic_election_id) < 1000000:
# If here, this is an election created by Google Civic and we limit what fields to update
# Update
if state_code is not False:
election_on_stage.state_code = state_code
election_changed = True
if election_changed:
election_on_stage.save()
messages.add_message(request, messages.INFO, 'Google Civic-created election updated.')
else:
# If here, this is a We Vote created election
# Update
if election_name is not False:
election_on_stage.election_name = election_name
election_changed = True
if election_day_text is not False:
election_on_stage.election_day_text = election_day_text
election_changed = True
if state_code is not False:
election_on_stage.state_code = state_code
election_changed = True
if election_changed:
election_on_stage.save()
messages.add_message(request, messages.INFO, 'We Vote-created election updated.')
else:
# Create new
next_local_election_id_integer = fetch_next_we_vote_election_id_integer()
election_on_stage = Election(
google_civic_election_id=next_local_election_id_integer,
election_name=election_name,
election_day_text=election_day_text,
state_code=state_code,
)
election_on_stage.save()
messages.add_message(request, messages.INFO, 'New election saved.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save election.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
@login_required()
def election_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_list_query = Election.objects.all()
election_list_query = election_list_query.order_by('election_day_text').reverse()
election_list = election_list_query
template_values = {
'messages_on_stage': messages_on_stage,
'election_list': election_list,
}
return render(request, 'election/election_list.html', template_values)
@login_required()
def election_remote_retrieve_view(request):
"""
Reach out to Google and retrieve the latest list of available elections
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = election_remote_retrieve()
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.INFO, 'Upcoming elections retrieved from Google Civic.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
@login_required()
def election_summary_view(request, election_local_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_local_id = convert_to_int(election_local_id)
election_on_stage_found = False
election_on_stage = Election()
try:
election_on_stage = Election.objects.get(id=election_local_id)
election_on_stage_found = True
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Election.DoesNotExist:
# This is fine, proceed anyways
pass
if election_on_stage_found:
ballot_returned_list_manager = BallotReturnedListManager()
ballot_returned_list_results = ballot_returned_list_manager.retrieve_ballot_returned_list_for_election(
election_on_stage.google_civic_election_id)
if ballot_returned_list_results['success']:
ballot_returned_list = ballot_returned_list_results['ballot_returned_list']
else:
ballot_returned_list = []
template_values = {
'messages_on_stage': messages_on_stage,
'election': election_on_stage,
'ballot_returned_list': ballot_returned_list,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'election/election_summary.html', template_values)
# TODO Which of these two do we standardize on?
class ElectionsSyncOutView(APIView):
"""
Export raw voter data to JSON format
"""
def get(self, request): # Removed: , format=None
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = elections_sync_out_list_for_api(voter_device_id)
if 'success' not in results:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif not results['success']:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
election_list = results['election_list']
serializer = ElectionSerializer(election_list, many=True)
return Response(serializer.data)
# This page does not need to be protected.
class ExportElectionDataView(APIView):
def get(self, request, format=None):
election_list = Election.objects.all()
serializer = ElectionSerializer(election_list, many=True)
return Response(serializer.data)
@login_required
def elections_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = elections_import_from_master_server()
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Elections import completed. '
'Saved: {saved}, Updated: {updated}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required()
def election_migration_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
we_vote_election = Election()
office_list_manager = ContestOfficeListManager()
candidate_list_manager = CandidateCampaignListManager()
position_list_manager = PositionListManager()
we_vote_election_office_list = []
google_civic_election_office_list = []
results = election_manager.retrieve_we_vote_elections()
we_vote_election_list = results['election_list']
state_code_list = []
for election in we_vote_election_list:
if election.state_code not in state_code_list:
state_code_list.append(election.state_code)
google_civic_election = Election()
results = election_manager.retrieve_google_civic_elections_in_state_list(state_code_list)
google_civic_election_list = results['election_list']
we_vote_election_id = convert_to_int(request.GET.get('we_vote_election_id', 0))
if not positive_value_exists(we_vote_election_id):
we_vote_election_id = convert_to_int(request.POST.get('we_vote_election_id', 0))
if positive_value_exists(we_vote_election_id):
results = election_manager.retrieve_election(we_vote_election_id)
if results['election_found']:
we_vote_election = results['election']
return_list_of_objects = True
results = office_list_manager.retrieve_all_offices_for_upcoming_election(we_vote_election_id,
return_list_of_objects)
if results['office_list_found']:
we_vote_election_office_list = results['office_list_objects']
# Go through each office and attach a list of candidates under this office
we_vote_election_office_list_new = []
for one_office in we_vote_election_office_list:
candidate_results = candidate_list_manager.retrieve_all_candidates_for_office(0, one_office.we_vote_id)
if candidate_results['candidate_list_found']:
candidate_list = candidate_results['candidate_list']
new_candidate_list = []
# Go through candidate_list and find the number of positions saved for each candidate
for candidate in candidate_list:
retrieve_public_positions = True # The alternate is positions for friends-only
position_list = position_list_manager.retrieve_all_positions_for_candidate_campaign(
retrieve_public_positions, 0, candidate.we_vote_id)
candidate.position_count = len(position_list) # This is wasteful (instead of using count), but ok
# Now find the candidates from the Google Civic Election that we might want to transfer data to
new_candidate_list.append(candidate)
one_office.candidate_list = new_candidate_list
else:
one_office.candidate_list = []
we_vote_election_office_list_new.append(one_office)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
google_civic_election_id = convert_to_int(request.POST.get('google_civic_election_id', 0))
if positive_value_exists(google_civic_election_id):
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
google_civic_election = results['election']
return_list_of_objects = True
results = office_list_manager.retrieve_all_offices_for_upcoming_election(google_civic_election_id,
return_list_of_objects)
if results['office_list_found']:
google_civic_election_office_list = results['office_list_objects']
# We want to transfer the
transfer_array = {}
transfer_array['wv01off1461'] = "wv02off269"
template_values = {
'messages_on_stage': messages_on_stage,
'we_vote_election': we_vote_election,
'we_vote_election_id': we_vote_election_id,
'we_vote_election_list': we_vote_election_list,
'we_vote_election_office_list': we_vote_election_office_list_new,
'google_civic_election': google_civic_election,
'google_civic_election_id': google_civic_election_id,
'google_civic_election_list': google_civic_election_list,
'google_civic_election_office_list': google_civic_election_office_list,
}
return render(request, 'election/election_migration.html', template_values)
|
glouppe/scikit-learn
|
refs/heads/master
|
examples/classification/plot_lda.py
|
142
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
|
CUCWD/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/plugins/plugin_signals.py
|
16
|
from logging import getLogger
from . import constants, registry, utils
log = getLogger(__name__)
def connect_receivers(project_type):
for signals_module, signals_config in _iter_plugins(project_type):
for signal, receiver_func, receiver_config in _iter_receivers(signals_module, signals_config):
signal.connect(
receiver_func,
sender=_get_sender(receiver_config),
dispatch_uid=_get_dispatch_uuid(receiver_config, receiver_func),
)
def _iter_receivers(signals_module, signals_config):
for receiver_config in signals_config.get(constants.PluginSignals.RECEIVERS, []):
receiver_func = utils.import_attr_in_module(
signals_module,
receiver_config[constants.PluginSignals.RECEIVER_FUNC_NAME],
)
signal = utils.import_attr(receiver_config[constants.PluginSignals.SIGNAL_PATH])
yield signal, receiver_func, receiver_config
def _iter_plugins(project_type):
for app_config in registry.get_app_configs(project_type):
signals_config = _get_config(app_config, project_type)
if signals_config is None:
log.debug(u'Plugin Apps [Signals]: Did NOT find %s for %s', app_config.name, project_type)
continue
signals_module_path = utils.get_module_path(app_config, signals_config, constants.PluginSignals)
signals_module = utils.import_module(signals_module_path)
log.debug(
u'Plugin Apps [Signals]: Found %s with %d receiver(s) for %s',
app_config.name,
len(signals_config.get(constants.PluginSignals.RECEIVERS, [])),
project_type,
)
yield signals_module, signals_config
def _get_config(app_config, project_type):
plugin_config = getattr(app_config, constants.PLUGIN_APP_CLASS_ATTRIBUTE_NAME, {})
signals_config = plugin_config.get(constants.PluginSignals.CONFIG, {})
return signals_config.get(project_type)
def _get_sender(receiver_config):
sender_path = receiver_config.get(constants.PluginSignals.SENDER_PATH)
if sender_path:
sender = utils.import_attr(sender_path)
return sender
def _get_dispatch_uuid(receiver_config, receiver_func):
dispatch_uid = receiver_config.get(constants.PluginSignals.DISPATCH_UID)
if dispatch_uid is None:
dispatch_uid = u'{}.{}'.format(receiver_func.__module__, receiver_func.__name__)
return dispatch_uid
|
bat-serjo/vivisect
|
refs/heads/master
|
vivisect/codegraph.py
|
3
|
'''
Various codeflow oriented graph constructs.
'''
import envi
import visgraph.graphcore as v_graphcore
from vivisect.const import *
class CallGraph(v_graphcore.HierGraph):
'''
A graph which represents procedural branches.
'''
def __init__(self):
v_graphcore.Graph.__init__(self)
def getFunctionNode(self, va):
node = self.getNode(va)
if node is None:
node = self.addNode(nid=va)
return node
def getCallEdge(self, f1va, f2va):
f1 = self.getFunctionNode(f1va)
# deconflict call graph edges...
for edge in self.getRefsFrom(f1):
if edge[2] == f2va:
return edge
f2 = self.getFunctionNode(f2va)
return self.addEdge(f1,f2)
class CodeBlockGraph(v_graphcore.HierGraph):
def __init__(self, vw):
v_graphcore.Graph.__init__(self)
self.vw = vw
self.nodevas = {}
def addEntryPoint(self, va):
node = self.getNode(va)
if node is not None:
return node
# entry point, by de-facto has a node
enode = self.getCodeBlockNode(va)
done = set()
todo = [ va, ]
while todo:
va = todo.pop()
if va in done:
continue
done.add(va)
branches = self._getCodeBranches(va)
tdone = set()
for tova,bflags in branches:
if tova in tdone:
continue
tdone.add(tova)
node = self.getNodeByVa(va)
if self._addCodeBranch(node,va,tova,bflags):
todo.append( tova )
return enode
def _getCodeBranches(self, va):
loc = self.vw.getLocation(va)
if loc is None or loc[L_LTYPE] != LOC_OP:
return []
lva,lsize,ltype,ltinfo = loc
xrefs = self.vw.getXrefsFrom(va, rtype=REF_CODE)
crefs = [ (xto,xflags) for (xfrom,xto,xtype,xflags) in xrefs ]
# If any of our other branches are conditional, so is our fall
if not ltinfo & envi.IF_NOFALL:
bflags = envi.BR_FALL
if any([ (x[3] & envi.BR_COND) for x in xrefs]):
bflags |= envi.BR_COND
crefs.append( (lva+lsize, bflags) )
return crefs
def _addCodeBranch(self, node, va, brva, bflags):
if self.isCodeBlockNode(brva):
self.addCodeBlockEdge(node,va,brva)
return True
if bflags & envi.BR_FALL and not bflags & envi.BR_COND:
self.addVaToNode(node,brva)
return True
if bflags & envi.BR_DEREF:
# FIXME handle these
return False
n2node = self.addCodeBlockEdge(node,va,brva)
if bflags & envi.BR_PROC:
self.setNodeProp(n2node,'isfunc',True)
return True
def isCodeBlockNode(self, va):
return self.getNode(va) is not None
def getCodeBlockBounds(self, node):
cbva = node[0]
lastva = node[1]['valist'][-1]
cbsize = (lastva - cbva) + 1
return cbva,cbsize
def getCodeBlockNode(self, va):
'''
Create or retrieve a codeblock node for the given va.
NOTE: If the given va is already present within another
node, this API will *split* the other node.
'''
# is it already a cb node?
node = self.getNode(va)
if node is not None:
return node
# is it part of another block already?
node = self.getNodeByVa(va)
newnode = self.addNode(nid=va,cbva=va,valist=())
self.addVaToNode(newnode,va)
if node is None:
return newnode
# we need to split an existing node... neato...
valist = node[1]['valist']
vaidx = valist.index(va)
vabeg = valist[:vaidx]
vaend = valist[vaidx:]
lastva = vabeg[-1]
newlastva = vaend[-1]
self.setNodeVaList(node, vabeg)
self.setNodeVaList(newnode, vaend)
# steal all his outbound codeflow edges
for edge in self.getRefsFrom(node):
codeflow = edge[3].get('codeflow')
if codeflow is None:
continue
self.addCodeBlockEdge(newnode, codeflow[0], codeflow[1])
self.delEdge(edge)
# add an outbound to us...
self.addCodeBlockEdge(node, lastva, va)
return newnode
def addCodeBlockEdge(self, node1, va1, va2):
vatup = (va1,va2)
edges = self.getEdgesByProp('codeflow',vatup)
if len(edges):
return edges[0]
node2 = self.getCodeBlockNode(va2)
edge = self.addEdge(node1, node2)
self.setEdgeProp(edge, 'va1', va1)
self.setEdgeProp(edge, 'va2', va2)
self.setEdgeProp(edge, 'codeflow', vatup)
#w1 = node1[1].get('weight',0)
#w2 = node2[1].get('weight',0)
# track weights in real time ( per func? )
#self.setNodeProp(node2,'weight',max(w2,w1+1))
return node2
def addVaToNode(self, node, va):
self.nodevas[va] = node
valist = node[1]['valist']
self.setNodeProp(node,'valist',valist + (va,))
def setNodeVaList(self, node, valist):
[ self.nodevas.pop(va,None) for va in node[1]['valist'] ]
[ self.nodevas.__setitem__(va,node) for va in valist ]
self.setNodeProp(node,'valist',valist)
def getNodeByVa(self, va):
return self.nodevas.get(va)
class FuncBlockGraph(CodeBlockGraph):
def __init__(self, vw, fva):
CodeBlockGraph.__init__(self,vw)
root = self.addEntryPoint(fva)
self.setHierRootNode(root)
def _getCodeBranches(self, va):
return [ x for x in CodeBlockGraph._getCodeBranches(self,va) if not x[1] & envi.BR_PROC ]
|
pgagne/robottelo
|
refs/heads/master
|
tests/foreman/ui/test_lifecycleenvironment.py
|
2
|
"""Test class for Lifecycle Environment UI
:Requirement: Lifecycleenvironment
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: LifecycleEnvironments
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from nailgun import entities
from navmazing import NavigationTriesExceeded
from pytest import raises
from airgun.session import Session
from robottelo.api.utils import create_role_permissions
from robottelo.constants import (
CUSTOM_MODULE_STREAM_REPO_2,
ENVIRONMENT,
FAKE_0_CUSTOM_PACKAGE,
FAKE_0_CUSTOM_PACKAGE_NAME,
FAKE_0_PUPPET_REPO,
FAKE_0_YUM_REPO,
FAKE_1_CUSTOM_PACKAGE,
FAKE_1_CUSTOM_PACKAGE_NAME,
FAKE_2_CUSTOM_PACKAGE,
FAKE_3_CUSTOM_PACKAGE_NAME,
REPO_TYPE,
)
from robottelo.datafactory import gen_string
from robottelo.decorators import (
fixture,
tier2,
tier3,
upgrade,
)
@fixture(scope='module')
def module_org():
return entities.Organization().create()
@upgrade
@tier2
def test_positive_end_to_end(session):
"""Perform end to end testing for lifecycle environment component
:id: b2293de9-7a71-462e-b988-321b07c01642
:expectedresults: All expected CRUD actions finished successfully
:CaseLevel: Integration
:CaseImportance: High
"""
lce_name = gen_string('alpha')
new_lce_name = gen_string('alpha')
label = gen_string('alpha')
description = gen_string('alpha')
with session:
# Create new lce
session.lifecycleenvironment.create({
'name': lce_name,
'label': label,
'description': description
})
lce_values = session.lifecycleenvironment.read(lce_name)
assert lce_values['details']['name'] == lce_name
assert lce_values['details']['label'] == label
assert lce_values['details']['description'] == description
assert lce_values['details']['unauthenticated_pull'] == 'No'
# Update lce with new name
session.lifecycleenvironment.update(lce_name, {'details.name': new_lce_name})
lce_values = session.lifecycleenvironment.read_all()
assert new_lce_name in lce_values['lce']
assert lce_name not in lce_values['lce']
# Delete lce
session.lifecycleenvironment.delete(new_lce_name)
lce_values = session.lifecycleenvironment.read_all()
assert new_lce_name not in lce_values['lce']
@upgrade
@tier2
def test_positive_create_chain(session):
"""Create Content Environment in a chain
:id: ed3d2c88-ef0a-4a1a-9f11-5bdb2119fc18
:expectedresults: Environment is created
:CaseLevel: Integration
"""
lce_path_name = gen_string('alpha')
lce_name = gen_string('alpha')
with session:
session.lifecycleenvironment.create(
values={'name': lce_path_name}
)
session.lifecycleenvironment.create(
values={'name': lce_name},
prior_entity_name=lce_path_name,
)
lce_values = session.lifecycleenvironment.read_all()
assert lce_name in lce_values['lce']
assert lce_path_name in lce_values['lce'][lce_name]
@tier2
@upgrade
def test_positive_add_puppet_module(session, module_org):
"""Promote content view with puppet module to a new environment
:id: 12bed99d-8f96-48ca-843a-b77e123e8e2e
:steps:
1. Create Product/puppet repo and sync it
2. Create CV and add puppet module from created repo
3. Publish and promote CV to new environment
:expectedresults: Puppet modules can be listed successfully from lifecycle
environment interface
:BZ: 1408264
:CaseLevel: Integration
"""
puppet_module = 'httpd'
product = entities.Product(organization=module_org).create()
repo = entities.Repository(
product=product,
content_type=REPO_TYPE['puppet'],
url=FAKE_0_PUPPET_REPO
).create()
repo.sync()
lce = entities.LifecycleEnvironment(organization=module_org).create()
cv = entities.ContentView(organization=module_org).create()
with session:
session.contentview.add_puppet_module(cv.name, puppet_module)
session.contentview.publish(cv.name)
result = session.contentview.promote(cv.name, 'Version 1.0', lce.name)
assert 'Promoted to {}'.format(lce.name) in result['Status']
lce = session.lifecycleenvironment.search_puppet_module(
lce.name,
puppet_module,
cv_name=cv.name
)
assert lce[0]['Name'] == puppet_module
@tier3
def test_positive_search_lce_content_view_packages_by_full_name(
session, module_org):
"""Search Lifecycle Environment content view packages by full name
Note: if package full name looks like "bear-4.1-1.noarch",
eg. name-version-release-arch, the package name is "bear"
:id: fad05fe9-b673-4384-b65a-926d4a0d2598
:customerscenario: true
:steps:
1. Create a product with a repository synchronized
- The repository must contain at least two package names P1 and
P2
- P1 has only one package
- P2 has two packages
2. Create a content view with the repository and publish it
3. Go to Lifecycle Environment > Library > Packages
4. Select the content view
5. Search by packages using full names
:expectedresults: only the searched packages where found
:BZ: 1432155
:CaseLevel: System
"""
packages = [
{'name': FAKE_0_CUSTOM_PACKAGE_NAME,
'full_names': [FAKE_0_CUSTOM_PACKAGE]},
{'name': FAKE_1_CUSTOM_PACKAGE_NAME,
'full_names': [FAKE_1_CUSTOM_PACKAGE, FAKE_2_CUSTOM_PACKAGE]},
]
product = entities.Product(organization=module_org).create()
repository = entities.Repository(
product=product, url=FAKE_0_YUM_REPO).create()
repository.sync()
content_view = entities.ContentView(
organization=module_org, repository=[repository]).create()
content_view.publish()
with session:
for package in packages:
for package_full_name in package['full_names']:
result = session.lifecycleenvironment.search_package(
ENVIRONMENT, package_full_name, cv_name=content_view.name)
assert len(result) == 1
assert result[0]['Name'] == package['name']
@tier3
def test_positive_search_lce_content_view_packages_by_name(
session, module_org):
"""Search Lifecycle Environment content view packages by name
Note: if package full name looks like "bear-4.1-1.noarch",
eg. name-version-release-arch, the package name is "bear"
:id: f8dec2a8-8971-44ad-a4d5-1eb5d2eb62f6
:customerscenario: true
:steps:
1. Create a product with a repository synchronized
- The repository must contain at least two package names P1 and
P2
- P1 has only one package
- P2 has two packages
2. Create a content view with the repository and publish it
3. Go to Lifecycle Environment > Library > Packages
4. Select the content view
5. Search by package names
:expectedresults: only the searched packages where found
:BZ: 1432155
:CaseLevel: System
"""
packages = [
{'name': FAKE_0_CUSTOM_PACKAGE_NAME,
'packages_count': 1},
{'name': FAKE_1_CUSTOM_PACKAGE_NAME,
'packages_count': 2},
]
product = entities.Product(organization=module_org).create()
repository = entities.Repository(
product=product, url=FAKE_0_YUM_REPO).create()
repository.sync()
content_view = entities.ContentView(
organization=module_org, repository=[repository]).create()
content_view.publish()
with session:
for package in packages:
result = session.lifecycleenvironment.search_package(
ENVIRONMENT, package['name'], cv_name=content_view.name)
assert len(result) == package['packages_count']
for entry in result:
assert entry['Name'].startswith(package['name'])
@tier3
def test_positive_search_lce_content_view_module_streams_by_name(
session, module_org):
"""Search Lifecycle Environment content view module streams by name
:id: e67893b2-a56e-4eac-87e6-63be897ba912
:customerscenario: true
:steps:
1. Create a product with a repository synchronized
- The repository must contain at least two module stream names P1 and
P2
- P1 has two module streams
- P2 has three module streams
2. Create a content view with the repository and publish it
3. Go to Lifecycle Environment > Library > ModuleStreams
4. Select the content view
5. Search by module stream names
:expectedresults: only the searched module streams where found
:CaseLevel: System
"""
module_streams = [
{
'name': FAKE_1_CUSTOM_PACKAGE_NAME,
'streams_count': 2
},
{
'name': FAKE_3_CUSTOM_PACKAGE_NAME,
'streams_count': 3
},
]
product = entities.Product(organization=module_org).create()
repository = entities.Repository(
product=product, url=CUSTOM_MODULE_STREAM_REPO_2).create()
repository.sync()
content_view = entities.ContentView(
organization=module_org, repository=[repository]).create()
content_view.publish()
with session:
for module in module_streams:
result = session.lifecycleenvironment.search_module_stream(
ENVIRONMENT, module['name'], cv_name=content_view.name)
assert len(result) == module['streams_count']
for entry in result:
assert entry['Name'].startswith(module['name'])
@tier2
@upgrade
def test_positive_custom_user_view_lce(session, test_name):
"""As a custom user attempt to view a lifecycle environment created
by admin user
:id: 768b647b-c530-4eca-9caa-38cf8622f36d
:BZ: 1420511
:Steps:
As an admin user:
1. Create an additional lifecycle environments other than Library
2. Create a user without administrator privileges
3. Create a role with the the following permissions:
* (Miscellaneous): access_dashboard
* Lifecycle Environment:
* edit_lifecycle_environments
* promote_or_remove_content_views_to_environment
* view_lifecycle_environments
* Location: view_locations
* Organization: view_organizations
4. Assign the created role to the custom user
As a custom user:
1. Log in
2. Navigate to Content -> Lifecycle Environments
:expectedresults: The additional lifecycle environment is viewable and
accessible by the custom user.
:CaseLevel: Integration
"""
role_name = gen_string('alpha')
lce_name = gen_string('alpha')
user_login = gen_string('alpha')
user_password = gen_string('alpha')
org = entities.Organization().create()
role = entities.Role(name=role_name).create()
permissions_types_names = {
None: ['access_dashboard'],
'Organization': ['view_organizations'],
'Location': ['view_locations'],
'Katello::KTEnvironment': [
'view_lifecycle_environments',
'edit_lifecycle_environments',
'promote_or_remove_content_views_to_environments'
]
}
create_role_permissions(role, permissions_types_names)
entities.User(
default_organization=org,
organization=[org],
role=[role],
login=user_login,
password=user_password
).create()
# create a life cycle environment as admin user and ensure it's visible
with session:
session.organization.select(org.name)
session.lifecycleenvironment.create(values={'name': lce_name})
lce_values = session.lifecycleenvironment.read_all()
assert lce_name in lce_values['lce']
# ensure the created user also can find the created lifecycle environment link
with Session(test_name, user_login, user_password) as non_admin_session:
# to ensure that the created user has only the assigned
# permissions, check that hosts menu tab does not exist
with raises(NavigationTriesExceeded):
assert not non_admin_session.host.read_all()
# assert that the user can view the lvce created by admin user
lce_values = non_admin_session.lifecycleenvironment.read_all()
assert lce_name in lce_values['lce']
|
sencha/chromium-spacewalk
|
refs/heads/master
|
tools/perf/page_sets/page_cycler/alexa_us.py
|
34
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class AlexaUsPage(page_module.Page):
def __init__(self, url, page_set):
super(AlexaUsPage, self).__init__(url=url, page_set=page_set)
class AlexaUsPageSet(page_set_module.PageSet):
""" Alexa US page_cycler benchmark """
def __init__(self):
super(AlexaUsPageSet, self).__init__(
# pylint: disable=C0301
serving_dirs=set(['../../../../data/page_cycler/alexa_us']))
urls_list = [
# pylint: disable=C0301
'file://../../../../data/page_cycler/alexa_us/accountservices.passport.net/',
'file://../../../../data/page_cycler/alexa_us/sfbay.craigslist.org/',
'file://../../../../data/page_cycler/alexa_us/www.amazon.com/',
'file://../../../../data/page_cycler/alexa_us/www.aol.com/',
'file://../../../../data/page_cycler/alexa_us/www.bbc.co.uk/',
'file://../../../../data/page_cycler/alexa_us/www.blogger.com/',
'file://../../../../data/page_cycler/alexa_us/www.cnn.com/',
'file://../../../../data/page_cycler/alexa_us/www.ebay.com/',
'file://../../../../data/page_cycler/alexa_us/www.flickr.com/',
'file://../../../../data/page_cycler/alexa_us/www.friendster.com/',
'file://../../../../data/page_cycler/alexa_us/www.go.com/',
'file://../../../../data/page_cycler/alexa_us/www.google.com/',
'file://../../../../data/page_cycler/alexa_us/www.imdb.com/',
'file://../../../../data/page_cycler/alexa_us/www.megaupload.com/',
'file://../../../../data/page_cycler/alexa_us/www.msn.com/',
'file://../../../../data/page_cycler/alexa_us/www.myspace.com/',
'file://../../../../data/page_cycler/alexa_us/www.orkut.com/',
'file://../../../../data/page_cycler/alexa_us/www.wikipedia.org/',
'file://../../../../data/page_cycler/alexa_us/www.xanga.com/',
'file://../../../../data/page_cycler/alexa_us/www.youtube.com/'
]
for url in urls_list:
self.AddPage(AlexaUsPage(url, self))
|
hlt-mt/tensorflow
|
refs/heads/master
|
tensorflow/tools/docker/simple_console.py
|
52
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
DebrahR/project2
|
refs/heads/master
|
server/lib/werkzeug/local.py
|
310
|
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
Yu can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
|
ngokevin/zamboni
|
refs/heads/master
|
mkt/site/management/commands/clean_redis.py
|
9
|
import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. You have to pass sys.executable both as the
# thing to run and so argv[0] is set properly.
os.execl(sys.executable, sys.executable, sys.argv[0],
sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1) if total[0] else 0
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
|
kostaspl/SpiderMonkey38
|
refs/heads/tmpbr
|
media/webrtc/trunk/tools/gyp/pylib/gyp/msvs_emulation.py
|
29
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
self.dxsdk_dir = _FindDirectXInstallation()
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
self.wdk_dir = os.environ.get('WDK_DIR')
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = self.GetTargetPlatform(config)
target_platform = {'x86': 'Win32'}.get(target_platform, target_platform)
replacements = {
'$(VSInstallDir)': self.vs_version.Path(),
'$(VCInstallDir)': os.path.join(self.vs_version.Path(), 'VC') + '\\',
'$(OutDir)\\': base_to_build + '\\' if base_to_build else '',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(InputPath)': '${source}',
'$(InputName)': '${root}',
'$(ProjectName)': self.spec['target_name'],
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
}
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
replacements['$(DXSDK_DIR)'] = self.dxsdk_dir if self.dxsdk_dir else ''
replacements['$(WDK_DIR)'] = self.wdk_dir if self.wdk_dir else ''
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
return [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetTargetPlatform(self, config):
target_platform = self.msvs_target_platform.get(config, '')
if not target_platform:
target_platform = 'Win32'
return {'Win32': 'x86'}.get(target_platform, target_platform)
def _RealConfig(self, config):
target_platform = self.GetTargetPlatform(config)
if target_platform == 'x64' and not config.endswith('_x64'):
config += '_x64'
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
config = self._RealConfig(config)
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
config = self._RealConfig(config)
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._RealConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._RealConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._RealConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._RealConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('AdditionalOptions', prefix='')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def GetPrecompiledHeader(self, config, gyp_to_build_path):
"""Returns an object that handles the generation of precompiled header
build steps."""
config = self._RealConfig(config)
return _PchHelper(self, config, gyp_to_build_path)
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._RealConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._RealConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._RealConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._RealConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._RealConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('AdditionalOptions')
return libflags
def _GetDefFileAsLdflags(self, spec, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = ''
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
ldflags.append('/DEF:"%s"' % gyp_to_build_path(def_files[0]))
elif len(def_files) > 1:
raise Exception("Multiple .def files")
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, is_executable):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._RealConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(self.spec, ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
ld('AdditionalOptions', prefix='')
ld('SubSystem', map={'1': 'CONSOLE', '2': 'WINDOWS'}, prefix='/SUBSYSTEM:')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration', map={'1': '/LTCG'})
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
# TODO(scottmg): These too.
ldflags.extend(('kernel32.lib', 'user32.lib', 'gdi32.lib', 'winspool.lib',
'comdlg32.lib', 'advapi32.lib', 'shell32.lib', 'ole32.lib',
'oleaut32.lib', 'uuid.lib', 'odbc32.lib', 'DelayImp.lib'))
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest_file = self._GetLdManifestFlags(
config, manifest_base_name, is_executable and not have_def_file)
ldflags.extend(manifest_flags)
manifest_files = self._GetAdditionalManifestFiles(config, gyp_to_build_path)
manifest_files.append(intermediate_manifest_file)
return ldflags, manifest_files
def _GetLdManifestFlags(self, config, name, allow_isolation):
"""Returns the set of flags that need to be added to the link to generate
a default manifest, as well as the name of the generated file."""
# Add manifest flags that mirror the defaults in VS. Chromium dev builds
# do not currently use any non-default settings, but we could parse
# VCManifestTool blocks if Chromium or other projects need them in the
# future. Of particular note, we do not yet support EmbedManifest because
# it complicates incremental linking.
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
'''/MANIFESTUAC:"level='asInvoker' uiAccess='false'"'''
]
if allow_isolation:
flags.append('/ALLOWISOLATION')
return flags, output_name
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if (self._Setting(
('VCManifestTool', 'EmbedManifest'), config, default='') == 'true'):
print 'gyp/msvs_emulation.py: "EmbedManifest: true" not yet supported.'
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._RealConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._RealConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def HasExplicitIdlRules(self, spec):
"""Determine if there's an explicit rule for idl files. When there isn't we
need to generate implicit rules to build MIDL .idl files."""
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
return True
return False
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._RealConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
flags = ['/char', 'signed', '/env', 'win32', '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(self, settings, config, gyp_to_build_path):
self.settings = settings
self.config = config
self.gyp_to_build_path = gyp_to_build_path
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def _PchSource(self):
"""Get the source file that is built once to compile the pch data."""
return self.gyp_to_build_path(
self.settings.msvs_precompiled_source[self.config])
def _PchOutput(self):
"""Get the name of the output of the compiled pch data."""
return '${pchprefix}.' + self._PchHeader() + '.pch'
def GetObjDependencies(self, sources, objs):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatability
with make.py on Mac, and xcode_emulation.py."""
if not self._PchHeader():
return []
source = self._PchSource()
assert source
pch_ext = os.path.splitext(self._PchSource())[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self._PchOutput())]
return []
def GetPchBuildCommands(self):
"""Returns [(path_to_pch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory."""
header = self._PchHeader()
source = self._PchSource()
if not source or not header:
return []
ext = os.path.splitext(source)[1]
lang = 'c' if ext == '.c' else 'cc'
return [(self._PchOutput(), '/Yc' + header, lang, source)]
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path."""
vs = GetVSVersion(generator_flags)
for arch in ('x86', 'x64'):
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
|
kutenai/django
|
refs/heads/master
|
tests/requests/test_data_upload_settings.py
|
12
|
from io import BytesIO
from django.core.exceptions import RequestDataTooBig, TooManyFieldsSent
from django.core.handlers.wsgi import WSGIRequest
from django.test import SimpleTestCase
from django.test.client import FakePayload
TOO_MANY_FIELDS_MSG = 'The number of GET/POST parameters exceeded settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
TOO_MUCH_DATA_MSG = 'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
class DataUploadMaxMemorySizeFormPostTests(SimpleTestCase):
def setUp(self):
payload = FakePayload('a=1&a=2;a=3\r\n')
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=12):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request._load_post_and_files()
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=13):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request._load_post_and_files()
class DataUploadMaxMemorySizeMultipartPostTests(SimpleTestCase):
def setUp(self):
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''
]))
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=10):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request._load_post_and_files()
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=11):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request._load_post_and_files()
def test_file_passes(self):
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="file1"; filename="test.file"',
'',
'value',
'--boundary--'
''
]))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=1):
request._load_post_and_files()
self.assertIn('file1', request.FILES, "Upload file not present")
class DataUploadMaxMemorySizeGetTests(SimpleTestCase):
def setUp(self):
self.request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': BytesIO(b''),
'CONTENT_LENGTH': 3,
})
def test_data_upload_max_memory_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=2):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request.body
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=3):
self.request.body
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request.body
class DataUploadMaxNumberOfFieldsGet(SimpleTestCase):
def test_get_max_fields_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=1):
with self.assertRaisesMessage(TooManyFieldsSent, TOO_MANY_FIELDS_MSG):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': BytesIO(b''),
'QUERY_STRING': 'a=1&a=2;a=3',
})
request.GET['a']
def test_get_max_fields_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=3):
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'wsgi.input': BytesIO(b''),
'QUERY_STRING': 'a=1&a=2;a=3',
})
request.GET['a']
class DataUploadMaxNumberOfFieldsMultipartPost(SimpleTestCase):
def setUp(self):
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name1"',
'',
'value1',
'--boundary',
'Content-Disposition: form-data; name="name2"',
'',
'value2',
'--boundary--'
''
]))
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_number_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=1):
with self.assertRaisesMessage(TooManyFieldsSent, TOO_MANY_FIELDS_MSG):
self.request._load_post_and_files()
def test_number_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=2):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=None):
self.request._load_post_and_files()
class DataUploadMaxNumberOfFieldsFormPost(SimpleTestCase):
def setUp(self):
payload = FakePayload("\r\n".join(['a=1&a=2;a=3', '']))
self.request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload,
})
def test_number_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=2):
with self.assertRaisesMessage(TooManyFieldsSent, TOO_MANY_FIELDS_MSG):
self.request._load_post_and_files()
def test_number_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=3):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_NUMBER_FIELDS=None):
self.request._load_post_and_files()
|
deathping1994/treeherder
|
refs/heads/master
|
tests/log_parser/test_job_artifact_builder.py
|
10
|
from tests import test_utils
from treeherder.log_parser.artifactbuildercollection import ArtifactBuilderCollection
from treeherder.log_parser.artifactbuilders import BuildbotJobArtifactBuilder
from ..sampledata import SampleData
def do_test(log):
"""
Test a single log with the ``JobArtifactBuilder``.
``log`` - the url prefix of the log to test. Also searches for the
result file with the same prefix.
"""
url = "file://{0}".format(
SampleData().get_log_path("{0}.txt.gz".format(log)))
exp = test_utils.load_exp("{0}.jobartifact.json".format(log))
builder = BuildbotJobArtifactBuilder(url)
lpc = ArtifactBuilderCollection(url, builders=builder)
lpc.parse()
act = lpc.artifacts[builder.name]
# we can't compare the "logurl" field, because it's a fully qualified url,
# so it will be different depending on the config it's run in.
assert "logurl" in act
del(act["logurl"])
# leaving the logurl in the exp files so they are a good example of the
# expected structure.
del(exp["logurl"])
# assert act == exp, diff(exp, act)
# if you want to gather results for a new test, use this
assert len(act) == len(exp)
for index, artifact in act.items():
assert artifact == exp[index]
# assert act == exp#, json.dumps(act, indent=4)
def test_crashtest_passing(initial_data):
"""Process a job with a single log reference."""
do_test("mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50")
def test_opt_test_failing(initial_data):
"""Process log with printlines and errors"""
do_test("mozilla-central_mountainlion_test-mochitest-2-bm80-tests1-macosx-build138")
def test_build_failing(initial_data):
"""Process a job with a single log reference."""
do_test("mozilla-central-macosx64-debug-bm65-build1-build15")
def test_mochitest_debug_passing(initial_data):
"""Process a job with a single log reference."""
do_test("mozilla-central_mountainlion-debug_test-mochitest-2-bm80-tests1-macosx-build93")
def test_mochitest_pass(initial_data):
"""Process a job with a single log reference."""
do_test("mozilla-central_mountainlion_test-mochitest-2-bm77-tests1-macosx-build141")
def test_mochitest_fail(initial_data):
"""Process a job with a single log reference."""
do_test("mozilla-esr17_xp_test_pgo-mochitest-browser-chrome-bm74-tests1-windows-build12")
def test_mochitest_process_crash(initial_data):
"""Test a mochitest log that has PROCESS-CRASH """
do_test("mozilla-inbound_ubuntu64_vm-debug_test-mochitest-other-bm53-tests1-linux-build122")
def test_jetpack_fail(initial_data):
"""Process a job with a single log reference."""
do_test("ux_ubuntu32_vm_test-jetpack-bm67-tests1-linux-build16")
|
MaheshIBM/keystone
|
refs/heads/master
|
keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
|
16
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
request_token_table = sql.Table('request_token', meta, autoload=True)
request_token_table.c.requested_roles.alter(nullable=True)
request_token_table.c.requested_roles.alter(name="role_ids")
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.requested_roles.alter(name="role_ids")
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
request_token_table = sql.Table('request_token', meta, autoload=True)
request_token_table.c.role_ids.alter(nullable=False)
request_token_table.c.role_ids.alter(name="requested_roles")
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.role_ids.alter(name="requested_roles")
|
clinc/models
|
refs/heads/master
|
im2txt/im2txt/train.py
|
30
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from im2txt import configuration
from im2txt import show_and_tell_model
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string("input_file_pattern", "",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("inception_checkpoint_file", "",
"Path to a pretrained inception_v3 model.")
tf.flags.DEFINE_string("train_dir", "",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_boolean("train_inception", False,
"Whether to train inception submodel variables.")
tf.flags.DEFINE_integer("number_of_steps", 1000000, "Number of training steps.")
tf.flags.DEFINE_integer("log_every_n_steps", 1,
"Frequency at which loss and global step are logged.")
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.train_dir, "--train_dir is required"
model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
training_config = configuration.TrainingConfig()
# Create training directory.
train_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(train_dir):
tf.logging.info("Creating training directory: %s", train_dir)
tf.gfile.MakeDirs(train_dir)
# Build the TensorFlow graph.
g = tf.Graph()
with g.as_default():
# Build the model.
model = show_and_tell_model.ShowAndTellModel(
model_config, mode="train", train_inception=FLAGS.train_inception)
model.build()
# Set up the learning rate.
learning_rate_decay_fn = None
if FLAGS.train_inception:
learning_rate = tf.constant(training_config.train_inception_learning_rate)
else:
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_rate_decay_fn = _learning_rate_decay_fn
# Set up the training ops.
train_op = tf.contrib.layers.optimize_loss(
loss=model.total_loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
# Run training.
tf.contrib.slim.learning.train(
train_op,
train_dir,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=FLAGS.number_of_steps,
init_fn=model.init_fn,
saver=saver)
if __name__ == "__main__":
tf.app.run()
|
baggioss/hadoop-cdh3u5
|
refs/heads/master
|
contrib/hod/hodlib/Common/miniHTMLParser.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import urllib, urlparse, re
from HTMLParser import HTMLParser
class miniHTMLParser( HTMLParser ):
viewedQueue = []
instQueue = []
def setBaseUrl(self, url):
self.baseUrl = url
def getNextLink( self ):
if self.instQueue == []:
return None
else:
return self.instQueue.pop(0)
def handle_starttag( self, tag, attrs ):
if tag == 'a':
newstr = urlparse.urljoin(self.baseUrl, str(attrs[0][1]))
if re.search('mailto', newstr) != None:
return
if (newstr in self.viewedQueue) == False:
self.instQueue.append( newstr )
self.viewedQueue.append( newstr )
|
beck/django
|
refs/heads/master
|
tests/template_tests/syntax_tests/test_autoescape.py
|
337
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeTagTests(SimpleTestCase):
@setup({'autoescape-tag01': '{% autoescape off %}hello{% endautoescape %}'})
def test_autoescape_tag01(self):
output = self.engine.render_to_string('autoescape-tag01')
self.assertEqual(output, 'hello')
@setup({'autoescape-tag02': '{% autoescape off %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag02(self):
output = self.engine.render_to_string('autoescape-tag02', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
@setup({'autoescape-tag03': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag03(self):
output = self.engine.render_to_string('autoescape-tag03', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
# Autoescape disabling and enabling nest in a predictable way.
@setup({'autoescape-tag04': '{% autoescape off %}'
'{{ first }} {% autoescape on %}{{ first }}{% endautoescape %}{% endautoescape %}'})
def test_autoescape_tag04(self):
output = self.engine.render_to_string('autoescape-tag04', {'first': '<a>'})
self.assertEqual(output, '<a> <a>')
@setup({'autoescape-tag05': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag05(self):
output = self.engine.render_to_string('autoescape-tag05', {'first': '<b>first</b>'})
self.assertEqual(output, '<b>first</b>')
# Strings (ASCII or unicode) already marked as "safe" are not
# auto-escaped
@setup({'autoescape-tag06': '{{ first }}'})
def test_autoescape_tag06(self):
output = self.engine.render_to_string('autoescape-tag06', {'first': mark_safe('<b>first</b>')})
self.assertEqual(output, '<b>first</b>')
@setup({'autoescape-tag07': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag07(self):
output = self.engine.render_to_string('autoescape-tag07', {'first': mark_safe('<b>Apple</b>')})
self.assertEqual(output, '<b>Apple</b>')
@setup({'autoescape-tag08': r'{% autoescape on %}'
r'{{ var|default_if_none:" endquote\" hah" }}{% endautoescape %}'})
def test_autoescape_tag08(self):
"""
Literal string arguments to filters, if used in the result, are safe.
"""
output = self.engine.render_to_string('autoescape-tag08', {"var": None})
self.assertEqual(output, ' endquote" hah')
# Objects which return safe strings as their __str__ method
# won't get double-escaped.
@setup({'autoescape-tag09': r'{{ unsafe }}'})
def test_autoescape_tag09(self):
output = self.engine.render_to_string('autoescape-tag09', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'you & me')
@setup({'autoescape-tag10': r'{{ safe }}'})
def test_autoescape_tag10(self):
output = self.engine.render_to_string('autoescape-tag10', {'safe': SafeClass()})
self.assertEqual(output, 'you > me')
@setup({'autoescape-filtertag01': '{{ first }}{% filter safe %}{{ first }} x<y{% endfilter %}'})
def test_autoescape_filtertag01(self):
"""
The "safe" and "escape" filters cannot work due to internal
implementation details (fortunately, the (no)autoescape block
tags can be used in those cases)
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('autoescape-filtertag01', {'first': '<a>'})
@setup({'autoescape-ifequal01': '{% ifequal var "this & that" %}yes{% endifequal %}'})
def test_autoescape_ifequal01(self):
"""
ifequal compares unescaped vales.
"""
output = self.engine.render_to_string('autoescape-ifequal01', {'var': 'this & that'})
self.assertEqual(output, 'yes')
# Arguments to filters are 'safe' and manipulate their input unescaped.
@setup({'autoescape-filters01': '{{ var|cut:"&" }}'})
def test_autoescape_filters01(self):
output = self.engine.render_to_string('autoescape-filters01', {'var': 'this & that'})
self.assertEqual(output, 'this that')
@setup({'autoescape-filters02': '{{ var|join:" & " }}'})
def test_autoescape_filters02(self):
output = self.engine.render_to_string('autoescape-filters02', {'var': ('Tom', 'Dick', 'Harry')})
self.assertEqual(output, 'Tom & Dick & Harry')
@setup({'autoescape-literals01': '{{ "this & that" }}'})
def test_autoescape_literals01(self):
"""
Literal strings are safe.
"""
output = self.engine.render_to_string('autoescape-literals01')
self.assertEqual(output, 'this & that')
@setup({'autoescape-stringiterations01': '{% for l in var %}{{ l }},{% endfor %}'})
def test_autoescape_stringiterations01(self):
"""
Iterating over strings outputs safe characters.
"""
output = self.engine.render_to_string('autoescape-stringiterations01', {'var': 'K&R'})
self.assertEqual(output, 'K,&,R,')
@setup({'autoescape-lookup01': '{{ var.key }}'})
def test_autoescape_lookup01(self):
"""
Escape requirement survives lookup.
"""
output = self.engine.render_to_string('autoescape-lookup01', {'var': {'key': 'this & that'}})
self.assertEqual(output, 'this & that')
|
SFvue/sfvue3
|
refs/heads/master
|
tracks/views.py
|
7
|
from django.views.generic import TemplateView, RedirectView
from django.core.urlresolvers import reverse
class TrackHomeView(TemplateView):
template_name = 'coming_soon.html'
#class TrackHomeView(RedirectView):
# def get_redirect_url(self):
# return reverse('coming_soon')
|
FireballDWF/cloud-custodian
|
refs/heads/master
|
tools/c7n_kube/c7n_kube/resources/core/service.py
|
5
|
# Copyright 2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from c7n_kube.query import QueryResourceManager, TypeInfo
from c7n_kube.provider import resources
@resources.register('service')
class Service(QueryResourceManager):
class resource_type(TypeInfo):
group = 'Core'
version = 'V1'
patch = 'patch_namespaced_service'
delete = 'delete_namespaced_service'
enum_spec = ('list_service_for_all_namespaces', 'items', None)
|
seibert/numba
|
refs/heads/master
|
numba/tests/test_looplifting.py
|
2
|
from io import StringIO
import numpy as np
from numba.core import types
from numba.core.compiler import compile_isolated, Flags
from numba.tests.support import TestCase, tag, MemoryLeakMixin
import unittest
looplift_flags = Flags()
looplift_flags.set("enable_pyobject")
looplift_flags.set("enable_looplift")
pyobject_looplift_flags = looplift_flags.copy()
pyobject_looplift_flags.set("enable_pyobject_looplift")
def lift1(x):
# Outer needs object mode because of np.empty()
a = np.empty(3)
for i in range(a.size):
# Inner is nopython-compliant
a[i] = x
return a
def lift2(x):
# Outer needs object mode because of np.empty()
a = np.empty((3, 4))
for i in range(a.shape[0]):
for j in range(a.shape[1]):
# Inner is nopython-compliant
a[i, j] = x
return a
def lift3(x):
# Output variable from the loop
_ = object()
a = np.arange(5, dtype=np.int64)
c = 0
for i in range(a.shape[0]):
c += a[i] * x
return c
def lift4(x):
# Output two variables from the loop
_ = object()
a = np.arange(5, dtype=np.int64)
c = 0
d = 0
for i in range(a.shape[0]):
c += a[i] * x
d += c
return c + d
def lift5(x):
_ = object()
a = np.arange(4)
for i in range(a.shape[0]):
# Inner has a break statement
if i > 2:
break
return a
def lift_gen1(x):
# Outer needs object mode because of np.empty()
a = np.empty(3)
yield 0
for i in range(a.size):
# Inner is nopython-compliant
a[i] = x
yield np.sum(a)
def lift_issue2561():
np.empty(1) # This forces objectmode because no nrt
for i in range(10):
for j in range(10):
return 1
return 2
def reject1(x):
a = np.arange(4)
for i in range(a.shape[0]):
# Inner returns a variable from outer "scope" => cannot loop-lift
return a
return a
def reject_gen1(x):
_ = object()
a = np.arange(4)
for i in range(a.shape[0]):
# Inner is a generator => cannot loop-lift
yield a[i]
def reject_gen2(x):
_ = object()
a = np.arange(3)
for i in range(a.size):
# Middle has a yield => cannot loop-lift
res = a[i] + x
for j in range(i):
# Inner is nopython-compliant, but the current algorithm isn't
# able to separate it.
res = res ** 2
yield res
def reject_npm1(x):
a = np.empty(3, dtype=np.int32)
for i in range(a.size):
# Inner uses object() => cannot loop-lift
_ = object()
a[i] = np.arange(i + 1)[i]
return a
class TestLoopLifting(MemoryLeakMixin, TestCase):
def try_lift(self, pyfunc, argtypes):
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
# One lifted loop
self.assertEqual(len(cres.lifted), 1)
return cres
def assert_lifted_native(self, cres):
# Check if we have lifted in nopython mode
jitloop = cres.lifted[0]
[loopcres] = jitloop.overloads.values()
self.assertTrue(loopcres.fndesc.native) # Lifted function is native
def check_lift_ok(self, pyfunc, argtypes, args):
"""
Check that pyfunc can loop-lift even in nopython mode.
"""
cres = self.try_lift(pyfunc, argtypes)
expected = pyfunc(*args)
got = cres.entry_point(*args)
self.assert_lifted_native(cres)
# Check return values
self.assertPreciseEqual(expected, got)
def check_lift_generator_ok(self, pyfunc, argtypes, args):
"""
Check that pyfunc (a generator function) can loop-lift even in
nopython mode.
"""
cres = self.try_lift(pyfunc, argtypes)
expected = list(pyfunc(*args))
got = list(cres.entry_point(*args))
self.assert_lifted_native(cres)
# Check return values
self.assertPreciseEqual(expected, got)
def check_no_lift(self, pyfunc, argtypes, args):
"""
Check that pyfunc can't loop-lift.
"""
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
self.assertFalse(cres.lifted)
expected = pyfunc(*args)
got = cres.entry_point(*args)
# Check return values
self.assertPreciseEqual(expected, got)
def check_no_lift_generator(self, pyfunc, argtypes, args):
"""
Check that pyfunc (a generator function) can't loop-lift.
"""
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
self.assertFalse(cres.lifted)
expected = list(pyfunc(*args))
got = list(cres.entry_point(*args))
self.assertPreciseEqual(expected, got)
def check_no_lift_nopython(self, pyfunc, argtypes, args):
"""
Check that pyfunc will fail loop-lifting if pyobject mode
is disabled inside the loop, succeed otherwise.
"""
cres = compile_isolated(pyfunc, argtypes,
flags=looplift_flags)
self.assertTrue(cres.lifted)
with self.assertTypingError():
cres.entry_point(*args)
cres = compile_isolated(pyfunc, argtypes,
flags=pyobject_looplift_flags)
self.assertTrue(cres.lifted)
expected = pyfunc(*args)
got = cres.entry_point(*args)
self.assertPreciseEqual(expected, got)
def test_lift1(self):
self.check_lift_ok(lift1, (types.intp,), (123,))
def test_lift2(self):
self.check_lift_ok(lift2, (types.intp,), (123,))
def test_lift3(self):
self.check_lift_ok(lift3, (types.intp,), (123,))
def test_lift4(self):
self.check_lift_ok(lift4, (types.intp,), (123,))
def test_lift5(self):
self.check_lift_ok(lift5, (types.intp,), (123,))
def test_lift_issue2561(self):
self.check_no_lift(lift_issue2561, (), ())
def test_lift_gen1(self):
self.check_lift_generator_ok(lift_gen1, (types.intp,), (123,))
def test_reject1(self):
self.check_no_lift(reject1, (types.intp,), (123,))
def test_reject_gen1(self):
self.check_no_lift_generator(reject_gen1, (types.intp,), (123,))
def test_reject_gen2(self):
self.check_no_lift_generator(reject_gen2, (types.intp,), (123,))
def test_reject_npm1(self):
self.check_no_lift_nopython(reject_npm1, (types.intp,), (123,))
class TestLoopLiftingAnnotate(TestCase):
def test_annotate_1(self):
"""
Verify that annotation works as expected with one lifted loop
"""
from numba import jit
# dummy function to force objmode
def bar():
pass
def foo(x):
bar() # force obj
for i in range(x.size):
x[i] += 1
return x
cfoo = jit(foo)
x = np.arange(10)
xcopy = x.copy()
r = cfoo(x)
np.testing.assert_equal(r, xcopy + 1)
buf = StringIO()
cfoo.inspect_types(file=buf)
annotation = buf.getvalue()
buf.close()
self.assertIn("The function contains lifted loops", annotation)
line = foo.__code__.co_firstlineno + 2 # 2 lines down from func head
self.assertIn("Loop at line {line}".format(line=line), annotation)
self.assertIn("Has 1 overloads", annotation)
def test_annotate_2(self):
"""
Verify that annotation works as expected with two lifted loops
"""
from numba import jit
# dummy function to force objmode
def bar():
pass
def foo(x):
bar() # force obj
# first lifted loop
for i in range(x.size):
x[i] += 1
# second lifted loop
for j in range(x.size):
x[j] *= 2
return x
cfoo = jit(foo)
x = np.arange(10)
xcopy = x.copy()
r = cfoo(x)
np.testing.assert_equal(r, (xcopy + 1) * 2)
buf = StringIO()
cfoo.inspect_types(file=buf)
annotation = buf.getvalue()
buf.close()
self.assertIn("The function contains lifted loops", annotation)
line1 = foo.__code__.co_firstlineno + 3 # 3 lines down from func head
line2 = foo.__code__.co_firstlineno + 6 # 6 lines down from func head
self.assertIn("Loop at line {line}".format(line=line1), annotation)
self.assertIn("Loop at line {line}".format(line=line2), annotation)
class TestLoopLiftingInAction(MemoryLeakMixin, TestCase):
def assert_has_lifted(self, jitted, loopcount):
lifted = jitted.overloads[jitted.signatures[0]].lifted
self.assertEqual(len(lifted), loopcount)
def test_issue_734(self):
from numba import jit, void, int32, double
@jit(void(int32, double[:]), forceobj=True)
def forloop_with_if(u, a):
if u == 0:
for i in range(a.shape[0]):
a[i] = a[i] * 2.0
else:
for i in range(a.shape[0]):
a[i] = a[i] + 1.0
for u in (0, 1):
nb_a = np.arange(10, dtype='int32')
np_a = np.arange(10, dtype='int32')
forloop_with_if(u, nb_a)
forloop_with_if.py_func(u, np_a)
self.assertPreciseEqual(nb_a, np_a)
def test_issue_812(self):
from numba import jit
@jit('f8[:](f8[:])', forceobj=True)
def test(x):
res = np.zeros(len(x))
ind = 0
for ii in range(len(x)):
ind += 1
res[ind] = x[ind]
if x[ind] >= 10:
break
# Invalid loopjitting will miss the usage of `ind` in the
# following loop.
for ii in range(ind + 1, len(x)):
res[ii] = 0
return res
x = np.array([1., 4, 2, -3, 5, 2, 10, 5, 2, 6])
np.testing.assert_equal(test.py_func(x), test(x))
def test_issue_2368(self):
from numba import jit
def lift_issue2368(a, b):
s = 0
for e in a:
s += e
h = b.__hash__()
return s, h
a = np.ones(10)
b = object()
jitted = jit(lift_issue2368)
expected = lift_issue2368(a, b)
got = jitted(a, b)
self.assertEqual(expected[0], got[0])
self.assertEqual(expected[1], got[1])
jitloop = jitted.overloads[jitted.signatures[0]].lifted[0]
[loopcres] = jitloop.overloads.values()
# assert lifted function is native
self.assertTrue(loopcres.fndesc.native)
def test_no_iteration_w_redef(self):
# redefinition of res in the loop with no use of res should not
# prevent lifting
from numba import jit
@jit(forceobj=True)
def test(n):
res = 0
for i in range(n):
res = i
return res
# loop count = 1, loop lift but loop body not execute
self.assertEqual(test.py_func(-1), test(-1))
self.assert_has_lifted(test, loopcount=1)
# loop count = 1, loop will lift and will execute
self.assertEqual(test.py_func(1), test(1))
self.assert_has_lifted(test, loopcount=1)
def test_no_iteration(self):
from numba import jit
@jit(forceobj=True)
def test(n):
res = 0
for i in range(n):
res += i
return res
# loop count = 1
self.assertEqual(test.py_func(-1), test(-1))
self.assert_has_lifted(test, loopcount=1)
# loop count = 1
self.assertEqual(test.py_func(1), test(1))
self.assert_has_lifted(test, loopcount=1)
def test_define_in_loop_body(self):
# tests a definition in a loop that leaves the loop is liftable
from numba import jit
@jit(forceobj=True)
def test(n):
for i in range(n):
res = i
return res
# loop count = 1
self.assertEqual(test.py_func(1), test(1))
self.assert_has_lifted(test, loopcount=1)
def test_invalid_argument(self):
"""Test a problem caused by invalid discovery of loop argument
when a variable is used afterwards but not before.
Before the fix, this will result in::
numba.ir.NotDefinedError: 'i' is not defined
"""
from numba import jit
@jit(forceobj=True)
def test(arg):
if type(arg) == np.ndarray: # force object mode
if arg.ndim == 1:
result = 0.0
j = 0
for i in range(arg.shape[0]):
pass
else:
raise Exception
else:
result = 0.0
i, j = 0, 0
return result
arg = np.arange(10)
self.assertEqual(test.py_func(arg), test(arg))
def test_conditionally_defined_in_loop(self):
from numba import jit
@jit(forceobj=True)
def test():
x = 5
y = 0
for i in range(2):
if i > 0:
x = 6
y += x
return y, x
self.assertEqual(test.py_func(), test())
self.assert_has_lifted(test, loopcount=1)
def test_stack_offset_error_when_has_no_return(self):
from numba import jit
import warnings
def pyfunc(a):
if a:
for i in range(10):
pass
with warnings.catch_warnings():
warnings.simplefilter("error")
cfunc = jit(forceobj=True)(pyfunc)
self.assertEqual(pyfunc(True), cfunc(True))
def test_variable_scope_bug(self):
"""
https://github.com/numba/numba/issues/2179
Looplifting transformation is using the wrong version of variable `h`.
"""
from numba import jit
def bar(x):
return x
def foo(x):
h = 0.
for k in range(x):
h = h + k
h = h - bar(x)
return h
cfoo = jit(foo)
self.assertEqual(foo(10), cfoo(10))
def test_recompilation_loop(self):
"""
https://github.com/numba/numba/issues/2481
"""
from numba import jit
def foo(x, y):
# slicing to make array `x` into different layout
# to cause a new compilation of the lifted loop
A = x[::y]
c = 1
for k in range(A.size):
object() # to force objectmode and looplifting
c = c * A[::-1][k] # the slice that is failing in static_getitem
return c
cfoo = jit(foo)
# First run just works
args = np.arange(10), 1
self.assertEqual(foo(*args), cfoo(*args))
# Exactly 1 lifted loop so far
self.assertEqual(len(cfoo.overloads[cfoo.signatures[0]].lifted), 1)
lifted = cfoo.overloads[cfoo.signatures[0]].lifted[0]
# The lifted loop has 1 signature
self.assertEqual(len(lifted.signatures), 1)
# Use different argument to trigger a new compilation of the lifted loop
args = np.arange(10), -1
self.assertEqual(foo(*args), cfoo(*args))
# Ensure that is really a new overload for the lifted loop
self.assertEqual(len(lifted.signatures), 2)
def test_lift_listcomp_block0(self):
def foo(X):
[y for y in (1,)]
for x in (1,):
pass
return X
# this is not nice, if you have 2+? liftable loops with one of them
# being list comp and in block 0 and force objmode compilation is set,
# in py27 this leads to a BUILD_LIST that is a lifting candidate with an
# entry of block 0, this is a problem as the loop lift prelude would be
# written to block -1 and havoc ensues. Therefore block 0 loop lifts
# are banned under this set of circumstances.
# check all compile and execute
from numba import jit
f = jit()(foo)
f(1)
self.assertEqual(f.overloads[f.signatures[0]].lifted, ())
f = jit(forceobj=True)(foo)
f(1)
self.assertEqual(len(f.overloads[f.signatures[0]].lifted), 1)
def test_lift_objectmode_issue_4223(self):
from numba import jit
@jit
def foo(a, b, c, d, x0, y0, n):
xs, ys = np.zeros(n), np.zeros(n)
xs[0], ys[0] = x0, y0
for i in np.arange(n-1):
xs[i+1] = np.sin(a * ys[i]) + c * np.cos(a * xs[i])
ys[i+1] = np.sin(b * xs[i]) + d * np.cos(b * ys[i])
object() # ensure object mode
return xs, ys
kwargs = dict(a=1.7, b=1.7, c=0.6, d=1.2, x0=0, y0=0, n=200)
got = foo(**kwargs)
expected = foo.py_func(**kwargs)
self.assertPreciseEqual(got[0], expected[0])
self .assertPreciseEqual(got[1], expected[1])
[lifted] = foo.overloads[foo.signatures[0]].lifted
self.assertEqual(len(lifted.nopython_signatures), 1)
if __name__ == '__main__':
unittest.main()
|
arnau126/django-mysql
|
refs/heads/master
|
src/django_mysql/models/fields/lists.py
|
2
|
from django.core import checks
from django.db.models import CharField, IntegerField, Lookup, TextField
from django.utils.translation import gettext_lazy as _
from django_mysql.forms import SimpleListField
from django_mysql.models.lookups import SetContains, SetIContains
from django_mysql.models.transforms import SetLength
from django_mysql.validators import ListMaxLengthValidator
class ListFieldMixin:
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
super().__init__(**kwargs)
if self.size:
self.validators.append(ListMaxLengthValidator(int(self.size)))
def get_default(self):
default = super().get_default()
if default == "":
return []
else:
return default
def check(self, **kwargs):
errors = super().check(**kwargs)
if not isinstance(self.base_field, (CharField, IntegerField)):
errors.append(
checks.Error(
"Base field for list must be a CharField or IntegerField.",
hint=None,
obj=self,
id="django_mysql.E005",
)
)
return errors
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = "\n ".join(
"{} ({})".format(error.msg, error.id) for error in base_errors
)
errors.append(
checks.Error(
"Base field for list has errors:\n %s" % messages,
hint=None,
obj=self,
id="django_mysql.E004",
)
)
return errors
@property
def description(self):
return _("List of %(base_description)s") % {
"base_description": self.base_field.description
}
def set_attributes_from_name(self, name):
super().set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
bad_paths = (
"django_mysql.models.fields.lists." + self.__class__.__name__,
"django_mysql.models.fields." + self.__class__.__name__,
)
if path in bad_paths:
path = "django_mysql.models." + self.__class__.__name__
args.insert(0, self.base_field)
kwargs["size"] = self.size
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, str):
if not len(value):
value = []
else:
value = [self.base_field.to_python(v) for v in value.split(",")]
return value
def from_db_value(self, value, expression, connection):
if isinstance(value, str):
if not len(value):
value = []
else:
value = [self.base_field.to_python(v) for v in value.split(",")]
return value
def get_prep_value(self, value):
if isinstance(value, list):
value = [str(self.base_field.get_prep_value(v)) for v in value]
for v in value:
if "," in v:
raise ValueError(
"List members in {klass} {name} cannot contain commas".format(
klass=self.__class__.__name__, name=self.name
)
)
elif not len(v):
raise ValueError(
"The empty string cannot be stored in {klass} {name}".format(
klass=self.__class__.__name__, name=self.name
)
)
return ",".join(value)
return value
def get_lookup(self, lookup_name):
lookup = super().get_lookup(lookup_name)
if lookup:
return lookup
try:
index = int(lookup_name)
except ValueError:
pass
else:
index += 1 # MySQL uses 1-indexing
return IndexLookupFactory(index)
return lookup
def value_to_string(self, obj):
vals = self.value_from_object(obj)
return self.get_prep_value(vals)
def formfield(self, **kwargs):
defaults = {
"form_class": SimpleListField,
"base_field": self.base_field.formfield(),
"max_length": self.size,
}
defaults.update(kwargs)
return super().formfield(**defaults)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
self.base_field.model = cls
class ListCharField(ListFieldMixin, CharField):
"""
A subclass of CharField for using MySQL's handy FIND_IN_SET function with.
"""
def check(self, **kwargs):
errors = super().check(**kwargs)
# Unfortunately this check can't really be done for IntegerFields since
# they have boundless length
has_base_error = any(e.id == "django_mysql.E004" for e in errors)
if (
not has_base_error
and self.max_length is not None
and isinstance(self.base_field, CharField)
and self.size
):
max_size = (
# The chars used
(self.size * (self.base_field.max_length))
# The commas
+ self.size
- 1
)
if max_size > self.max_length:
errors.append(
checks.Error(
"Field can overrun - set contains CharFields of max "
"length %s, leading to a comma-combined max length of "
"%s, which is greater than the space reserved for the "
"set - %s"
% (self.base_field.max_length, max_size, self.max_length),
hint=None,
obj=self,
id="django_mysql.E006",
)
)
return errors
class ListTextField(ListFieldMixin, TextField):
pass
ListCharField.register_lookup(SetContains)
ListTextField.register_lookup(SetContains)
ListCharField.register_lookup(SetIContains)
ListTextField.register_lookup(SetIContains)
ListCharField.register_lookup(SetLength)
ListTextField.register_lookup(SetLength)
class IndexLookup(Lookup):
def __init__(self, index, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = index
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = tuple(lhs_params) + tuple(rhs_params)
# Put rhs on the left since that's the order FIND_IN_SET uses
return "(FIND_IN_SET({}, {}) = {})".format(rhs, lhs, self.index), params
class IndexLookupFactory:
def __init__(self, index):
self.index = index
def __call__(self, *args, **kwargs):
return IndexLookup(self.index, *args, **kwargs)
|
kleskjr/scipy
|
refs/heads/master
|
scipy/linalg/tests/test_matfuncs.py
|
24
|
#!/usr/bin/env python
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.matfuncs module
"""
from __future__ import division, print_function, absolute_import
import random
import warnings
import functools
import numpy as np
from numpy import array, matrix, identity, dot, sqrt, double
from numpy.testing import (TestCase, run_module_suite,
assert_array_equal, assert_array_less, assert_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_allclose, assert_, decorators)
from scipy._lib._numpy_compat import _assert_warns
import scipy.linalg
from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
expm, expm_frechet, expm_cond, norm)
from scipy.linalg.matfuncs import expm2, expm3
from scipy.linalg import _matfuncs_inv_ssq
import scipy.linalg._expm_frechet
from scipy.optimize import minimize
def _get_al_mohy_higham_2012_experiment_1():
"""
Return the test matrix from Experiment (1) of [1]_.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.2210e-1, 3e4],
[0, 0, 0, 3.0744e-1]], dtype=float)
return A
class TestSignM(TestCase):
def test_nils(self):
a = array([[29.2, -24.2, 69.5, 49.8, 7.],
[-9.2, 5.2, -18., -16.8, -2.],
[-10., 6., -20., -18., -2.],
[-9.6, 9.6, -25.5, -15.4, -2.],
[9.8, -4.8, 18., 18.2, 2.]])
cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
[-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
[-4.08,0.56,-4.92,-7.6,0.56],
[-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
[4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
r = signm(a)
assert_array_almost_equal(r,cr)
def test_defective1(self):
a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
r = signm(a, disp=False)
#XXX: what would be the correct result?
def test_defective2(self):
a = array((
[29.2,-24.2,69.5,49.8,7.0],
[-9.2,5.2,-18.0,-16.8,-2.0],
[-10.0,6.0,-20.0,-18.0,-2.0],
[-9.6,9.6,-25.5,-15.4,-2.0],
[9.8,-4.8,18.0,18.2,2.0]))
r = signm(a, disp=False)
#XXX: what would be the correct result?
def test_defective3(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
r = signm(a, disp=False)
#XXX: what would be the correct result?
class TestLogM(TestCase):
def test_nils(self):
a = array([[-2., 25., 0., 0., 0., 0., 0.],
[0., -3., 10., 3., 3., 3., 0.],
[0., 0., 2., 15., 3., 3., 0.],
[0., 0., 0., 0., 15., 3., 0.],
[0., 0., 0., 0., 3., 10., 0.],
[0., 0., 0., 0., 0., -2., 25.],
[0., 0., 0., 0., 0., 0., -3.]])
m = (identity(7)*3.1+0j)-a
logm(m, disp=False)
#XXX: what would be the correct result?
def test_al_mohy_higham_2012_experiment_1_logm(self):
# The logm completes the round trip successfully.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_logm, info = logm(A, disp=False)
A_round_trip = expm(A_logm)
assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)
def test_al_mohy_higham_2012_experiment_1_funm_log(self):
# The raw funm with np.log does not complete the round trip.
# Note that the expm leg of the round trip is badly conditioned.
A = _get_al_mohy_higham_2012_experiment_1()
A_funm_log, info = funm(A, np.log, disp=False)
A_round_trip = expm(A_funm_log)
assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
# Eigenvalues are related to the branch cut.
W = np.linalg.eigvals(M)
err_msg = 'M:{0} eivals:{1}'.format(M, W)
# Check sqrtm round trip because it is used within logm.
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
# Check logm round trip.
M_logm, info = logm(M, disp=False)
M_logm_round_trip = expm(M_logm)
assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_logm, info = logm(M, disp=False)
M_round_trip = expm(M_logm)
assert_allclose(M_round_trip, M)
def test_logm_type_preservation_and_conversion(self):
# The logm matrix function should preserve the type of a matrix
# whose eigenvalues are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_logm, info = logm(A, disp=False)
assert_(A_logm.dtype.char in complex_dtype_chars)
def test_complex_spectrum_real_logm(self):
# This matrix has complex eigenvalues and real logm.
# Its output dtype depends on its input dtype.
M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
for dt in float, complex:
X = np.array(M, dtype=dt)
w = scipy.linalg.eigvals(X)
assert_(1e-2 < np.absolute(w.imag).sum())
Y, info = logm(X, disp=False)
assert_(np.issubdtype(Y.dtype, dt))
assert_allclose(expm(Y), X)
def test_real_mixed_sign_spectrum(self):
# These matrices have real eigenvalues with mixed signs.
# The output logm dtype is complex, regardless of input dtype.
for M in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]]):
for dt in float, complex:
A = np.array(M, dtype=dt)
A_logm, info = logm(A, disp=False)
assert_(np.issubdtype(A_logm.dtype, complex))
def test_exactly_singular(self):
A = np.array([[0, 0], [1j, 1j]])
B = np.asarray([[1, 1], [0, 0]])
for M in A, A.T, B, B.T:
expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_nearly_singular(self):
M = np.array([[1e-100]])
expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
L, info = _assert_warns(expected_warning, logm, M, disp=False)
E = expm(L)
assert_allclose(E, M, atol=1e-14)
def test_opposite_sign_complex_eigenvalues(self):
# See gh-6113
E = [[0, 1], [-1, 0]]
L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 4], [0, -1j]]
L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
E = [[1j, 0], [0, -1j]]
L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
assert_allclose(expm(L), E, atol=1e-14)
assert_allclose(logm(E), L, atol=1e-14)
class TestSqrtM(TestCase):
def test_round_trip_random_float(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_round_trip_random_complex(self):
np.random.seed(1234)
for n in range(1, 6):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_sqrtm, info = sqrtm(M, disp=False)
M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
assert_allclose(M_sqrtm_round_trip, M)
def test_bad(self):
# See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
e = 2**-5
se = sqrt(e)
a = array([[1.0,0,0,1],
[0,e,0,0],
[0,0,e,0],
[0,0,0,1]])
sa = array([[1,0,0,0.5],
[0,se,0,0],
[0,0,se,0],
[0,0,0,1]])
n = a.shape[0]
assert_array_almost_equal(dot(sa,sa),a)
# Check default sqrtm.
esa = sqrtm(a, disp=False, blocksize=n)[0]
assert_array_almost_equal(dot(esa,esa),a)
# Check sqrtm with 2x2 blocks.
esa = sqrtm(a, disp=False, blocksize=2)[0]
assert_array_almost_equal(dot(esa,esa),a)
def test_sqrtm_type_preservation_and_conversion(self):
# The sqrtm matrix function should preserve the type of a matrix
# whose eigenvalues are nonnegative with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]],
[[1, 1], [1, 1]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex type conversion for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(A_sqrtm.dtype.char in complex_dtype_chars)
def test_blocksizes(self):
# Make sure I do not goof up the blocksizes when they do not divide n.
np.random.seed(1234)
for n in range(1, 8):
A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
for blocksize in range(1, 10):
A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
assert_allclose(A_sqrtm_default, A_sqrtm_new)
def test_al_mohy_higham_2012_experiment_1(self):
# Matrix square root of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
A_sqrtm, info = sqrtm(A, disp=False)
A_round_trip = A_sqrtm.dot(A_sqrtm)
assert_allclose(A_round_trip, A, rtol=1e-5)
assert_allclose(np.tril(A_round_trip), np.tril(A))
def test_strict_upper_triangular(self):
# This matrix has no square root.
for dt in int, float:
A = np.array([
[0, 3, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=dt)
A_sqrtm, info = sqrtm(A, disp=False)
assert_(np.isnan(A_sqrtm).all())
def test_weird_matrix(self):
# The square root of matrix B exists.
for dt in int, float:
A = np.array([
[0, 0, 1],
[0, 0, 0],
[0, 1, 0]], dtype=dt)
B = np.array([
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]], dtype=dt)
assert_array_equal(B, A.dot(A))
# But scipy sqrtm is not clever enough to find it.
B_sqrtm, info = sqrtm(B, disp=False)
assert_(np.isnan(B_sqrtm).all())
def test_disp(self):
from io import StringIO
np.random.seed(1234)
A = np.random.rand(3, 3)
B = sqrtm(A, disp=True)
assert_allclose(B.dot(B), A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1+1j, 2], [0, 1-1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(sqrtm(M), R, atol=1e-14)
class TestFractionalMatrixPower(TestCase):
def test_round_trip_random_complex(self):
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1/p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_round_trip_random_float(self):
# This test is more annoying because it can hit the branch cut;
# this happens when the matrix has an eigenvalue
# with no imaginary component and with a real negative component,
# and it means that the principal branch does not exist.
np.random.seed(1234)
for p in range(1, 5):
for n in range(1, 5):
M_unscaled = np.random.randn(n, n)
for scale in np.logspace(-4, 4, 9):
M = M_unscaled * scale
M_root = fractional_matrix_power(M, 1/p)
M_round_trip = np.linalg.matrix_power(M_root, p)
assert_allclose(M_round_trip, M)
def test_larger_abs_fractional_matrix_powers(self):
np.random.seed(1234)
for n in (2, 3, 5):
for i in range(10):
M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
M_one_fifth = fractional_matrix_power(M, 0.2)
# Test the round trip.
M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
assert_allclose(M, M_round_trip)
# Test a large abs fractional power.
X = fractional_matrix_power(M, -5.4)
Y = np.linalg.matrix_power(M_one_fifth, -27)
assert_allclose(X, Y)
# Test another large abs fractional power.
X = fractional_matrix_power(M, 3.8)
Y = np.linalg.matrix_power(M_one_fifth, 19)
assert_allclose(X, Y)
def test_random_matrices_and_powers(self):
# Each independent iteration of this fuzz test picks random parameters.
# It tries to hit some edge cases.
np.random.seed(1234)
nsamples = 20
for i in range(nsamples):
# Sample a matrix size and a random real power.
n = random.randrange(1, 5)
p = np.random.randn()
# Sample a random real or complex matrix.
matrix_scale = np.exp(random.randrange(-4, 5))
A = np.random.randn(n, n)
if random.choice((True, False)):
A = A + 1j * np.random.randn(n, n)
A = A * matrix_scale
# Check a couple of analytically equivalent ways
# to compute the fractional matrix power.
# These can be compared because they both use the principal branch.
A_power = fractional_matrix_power(A, p)
A_logm, info = logm(A, disp=False)
A_power_expm_logm = expm(A_logm * p)
assert_allclose(A_power, A_power_expm_logm)
def test_al_mohy_higham_2012_experiment_1(self):
# Fractional powers of a tricky upper triangular matrix.
A = _get_al_mohy_higham_2012_experiment_1()
# Test remainder matrix power.
A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
A_sqrtm, info = sqrtm(A, disp=False)
A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
A_power = fractional_matrix_power(A, 0.5)
assert_array_equal(A_rem_power, A_power)
assert_allclose(A_sqrtm, A_power)
assert_allclose(A_sqrtm, A_funm_sqrt)
# Test more fractional powers.
for p in (1/2, 5/3):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1/p)
assert_allclose(A_round_trip, A, rtol=1e-2)
assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
def test_briggs_helper_function(self):
np.random.seed(1234)
for a in np.random.randn(10) + 1j * np.random.randn(10):
for k in range(5):
x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
x_expected = a ** np.exp2(-k) - 1
assert_allclose(x_observed, x_expected)
def test_type_preservation_and_conversion(self):
# The fractional_matrix_power matrix function should preserve
# the type of a matrix whose eigenvalues
# are positive with zero imaginary part.
# Test this preservation for variously structured matrices.
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, 1]],
[[1, 0], [1, 1]],
[[2, 1], [1, 1]],
[[2, 3], [1, 2]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(not any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check float type preservation
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char not in complex_dtype_chars)
# check complex type preservation
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex for the matrix negation
A = -np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
def test_type_conversion_mixed_sign_or_complex_spectrum(self):
complex_dtype_chars = ('F', 'D', 'G')
for matrix_as_list in (
[[1, 0], [0, -1]],
[[0, 1], [1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
# check that the spectrum has the expected properties
W = scipy.linalg.eigvals(matrix_as_list)
assert_(any(w.imag or w.real < 0 for w in W))
# Check various positive and negative powers
# with absolute values bigger and smaller than 1.
for p in (-2.4, -0.9, 0.2, 3.3):
# check complex->complex
A = np.array(matrix_as_list, dtype=complex)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
# check float->complex
A = np.array(matrix_as_list, dtype=float)
A_power = fractional_matrix_power(A, p)
assert_(A_power.dtype.char in complex_dtype_chars)
@decorators.knownfailureif(True, 'Too unstable across LAPACKs.')
def test_singular(self):
# Negative fractional powers do not work with singular matrices.
for matrix_as_list in (
[[0, 0], [0, 0]],
[[1, 1], [1, 1]],
[[1, 2], [3, 6]],
[[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
# Check fractional powers both for float and for complex types.
for newtype in (float, complex):
A = np.array(matrix_as_list, dtype=newtype)
for p in (-0.7, -0.9, -2.4, -1.3):
A_power = fractional_matrix_power(A, p)
assert_(np.isnan(A_power).all())
for p in (0.2, 1.43):
A_power = fractional_matrix_power(A, p)
A_round_trip = fractional_matrix_power(A_power, 1/p)
assert_allclose(A_round_trip, A)
def test_opposite_sign_complex_eigenvalues(self):
M = [[2j, 4], [0, -2j]]
R = [[1+1j, 2], [0, 1-1j]]
assert_allclose(np.dot(R, R), M, atol=1e-14)
assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
class TestExpM(TestCase):
def test_zero(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
assert_array_almost_equal(expm2(a),[[1,0],[0,1]])
assert_array_almost_equal(expm3(a),[[1,0],[0,1]])
def test_consistency(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = array([[0.,1],[-1,0]])
assert_array_almost_equal(expm(a), expm2(a))
assert_array_almost_equal(expm(a), expm3(a))
a = array([[1j,1],[-1,-2j]])
assert_array_almost_equal(expm(a), expm2(a))
assert_array_almost_equal(expm(a), expm3(a))
def test_npmatrix(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
a = matrix([[3.,0],[0,-3.]])
assert_array_almost_equal(expm(a), expm2(a))
def test_single_elt(self):
# See gh-5853
from scipy.sparse import csc_matrix
vOne = -2.02683397006j
vTwo = -2.12817566856j
mOne = csc_matrix([[vOne]], dtype='complex')
mTwo = csc_matrix([[vTwo]], dtype='complex')
outOne = expm(mOne)
outTwo = expm(mTwo)
assert_equal(type(outOne), type(mOne))
assert_equal(type(outTwo), type(mTwo))
assert_allclose(outOne[0, 0], complex(-0.44039415155949196,
-0.8978045395698304))
assert_allclose(outTwo[0, 0], complex(-0.52896401032626006,
-0.84864425749518878))
class TestExpmFrechet(TestCase):
def test_expm_frechet(self):
# a test of the basic functionality
M = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A = np.array([
[1, 2],
[5, 6],
], dtype=float)
E = np.array([
[3, 4],
[7, 8],
], dtype=float)
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}):
observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_small_norm_expm_frechet(self):
# methodically test matrices with a range of norms, for better coverage
M_original = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[0, 0, 1, 2],
[0, 0, 5, 6],
], dtype=float)
A_original = np.array([
[1, 2],
[5, 6],
], dtype=float)
E_original = np.array([
[3, 4],
[7, 8],
], dtype=float)
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
for ma, mb in m_neighbor_pairs:
ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
target_norm_1 = 0.5 * (ell_a + ell_b)
scale = target_norm_1 / A_original_norm_1
M = scale * M_original
A = scale * A_original
E = scale * E_original
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:2, 2:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_fuzz(self):
# try a bunch of crazy inputs
rfuncs = (
np.random.uniform,
np.random.normal,
np.random.standard_cauchy,
np.random.exponential)
ntests = 100
for i in range(ntests):
rfunc = random.choice(rfuncs)
target_norm_1 = random.expovariate(1.0)
n = random.randrange(2, 16)
A_original = rfunc(size=(n,n))
E_original = rfunc(size=(n,n))
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
scale = target_norm_1 / A_original_norm_1
A = scale * A_original
E = scale * E_original
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:n, n:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
def test_problematic_matrix(self):
# this test case uncovered a bug which has since been fixed
A = np.array([
[1.50591997, 1.93537998],
[0.41203263, 0.23443516],
], dtype=float)
E = np.array([
[1.87864034, 2.07055038],
[1.34102727, 0.67341123],
], dtype=float)
A_norm_1 = scipy.linalg.norm(A, 1)
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
@decorators.slow
@decorators.skipif(True, 'this test is deliberately slow')
def test_medium_matrix(self):
# profile this to see the speed difference
n = 1000
A = np.random.exponential(size=(n, n))
E = np.random.exponential(size=(n, n))
sps_expm, sps_frechet = expm_frechet(
A, E, method='SPS')
blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
A, E, method='blockEnlarge')
assert_allclose(sps_expm, blockEnlarge_expm)
assert_allclose(sps_frechet, blockEnlarge_frechet)
def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
p = np.reshape(p, A.shape)
p_norm = norm(p)
perturbation = eps * p * (A_norm / p_norm)
X_prime = expm(A + perturbation)
scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
return -scaled_relative_error
def _normalized_like(A, B):
return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
def _relative_error(f, A, perturbation):
X = f(A)
X_prime = f(A + perturbation)
return norm(X_prime - X) / norm(X)
class TestExpmConditionNumber(TestCase):
def test_expm_cond_smoke(self):
np.random.seed(1234)
for n in range(1, 4):
A = np.random.randn(n, n)
kappa = expm_cond(A)
assert_array_less(0, kappa)
def test_expm_bad_condition_number(self):
A = np.array([
[-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
[0, -1.201010529, 9.634696872e4, -4.681048289e9],
[0, 0, -1.132893222, 9.532491830e4],
[0, 0, 0, -1.179475332],
])
kappa = expm_cond(A)
assert_array_less(1e36, kappa)
def test_univariate(self):
np.random.seed(12345)
for x in np.linspace(-5, 5, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for x in np.logspace(-2, 2, num=11):
A = np.array([[x]])
assert_allclose(expm_cond(A), abs(x))
for i in range(10):
A = np.random.randn(1, 1)
assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
@decorators.slow
def test_expm_cond_fuzz(self):
np.random.seed(12345)
eps = 1e-5
nsamples = 10
for i in range(nsamples):
n = np.random.randint(2, 5)
A = np.random.randn(n, n)
A_norm = scipy.linalg.norm(A)
X = expm(A)
X_norm = scipy.linalg.norm(X)
kappa = expm_cond(A)
# Look for the small perturbation that gives the greatest
# relative error.
f = functools.partial(_help_expm_cond_search,
A, A_norm, X, X_norm, eps)
guess = np.ones(n*n)
out = minimize(f, guess, method='L-BFGS-B')
xopt = out.x
yopt = f(xopt)
p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
p_best_relerr = _relative_error(expm, A, p_best)
assert_allclose(p_best_relerr, -yopt * eps)
# Check that the identified perturbation indeed gives greater
# relative error than random perturbations with similar norms.
for j in range(5):
p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
assert_allclose(norm(p_best), norm(p_rand))
p_rand_relerr = _relative_error(expm, A, p_rand)
assert_array_less(p_rand_relerr, p_best_relerr)
# The greatest relative error should not be much greater than
# eps times the condition number kappa.
# In the limit as eps approaches zero it should never be greater.
assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
if __name__ == "__main__":
run_module_suite()
|
robovm/robovm-studio
|
refs/heads/master
|
python/testData/hierarchy/call/Static/Parentheses/main.py
|
80
|
from file_1 import target_func
def nothing(x):
pass
target_<caret>func()
|
yamstudio/mysite
|
refs/heads/master
|
blog/apps.py
|
241
|
from django.apps import AppConfig
class BlogConfig(AppConfig):
name = 'blog'
|
santoshdeshpande/transbiz
|
refs/heads/master
|
config/urls.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("transbiz.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
ukanga/SickRage
|
refs/heads/master
|
lib/sqlalchemy/dialects/postgresql/__init__.py
|
78
|
# postgresql/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, psycopg2, pg8000, pypostgresql, zxjdbc
base.dialect = psycopg2.dialect
from .base import \
INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
INET, CIDR, UUID, BIT, MACADDR, DOUBLE_PRECISION, TIMESTAMP, TIME, \
DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \
TSVECTOR
from .constraints import ExcludeConstraint
from .hstore import HSTORE, hstore
from .json import JSON, JSONElement
from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
TSTZRANGE
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR',
'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE',
'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONElement'
)
|
openstack/monasca-api
|
refs/heads/master
|
monasca_api/db/alembic/versions/00597b5c8325_initial.py
|
2
|
# Copyright 2018 SUSE Linux GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration for full schema (Git revision 00597b5c8325664c2c534625525f59232d243d66).
Revision ID: 00597b5c8325
Revises: N/A
Create Date: 2018-04-12 09:09:48.212206
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '00597b5c8325'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# Enum tables (will be prepopulated with values through bulk_insert)
alarm_states = op.create_table('alarm_state',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci'
)
op.bulk_insert(alarm_states,
[{'name': 'UNDETERMINED'},
{'name': 'OK'},
{'name': 'ALARM'}])
ad_severities = op.create_table(
'alarm_definition_severity',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.bulk_insert(ad_severities,
[{'name': 'LOW'},
{'name': 'MEDIUM'},
{'name': 'HIGH'},
{'name': 'CRITICAL'}])
nm_types = op.create_table(
'notification_method_type',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.bulk_insert(nm_types,
[{'name': 'EMAIL'},
{'name': 'WEBHOOK'},
{'name': 'PAGERDUTY'}])
stream_action_types = op.create_table(
'stream_actions_action_type',
sa.Column('name',
sa.String(length=20),
nullable=False),
sa.PrimaryKeyConstraint('name'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.bulk_insert(stream_action_types,
[{'name': 'FIRE'},
{'name': 'EXPIRE'}])
op.create_table(
'alarm_definition',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('name',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Column('description',
sa.String(length=255),
nullable=True,
server_default=None),
sa.Column('expression',
sa.dialects.mysql.LONGTEXT(),
nullable=False),
sa.Column('severity',
sa.String(length=20),
nullable=False),
sa.Column('match_by',
sa.String(length=255),
nullable=True,
server_default=''),
sa.Column('actions_enabled',
sa.Boolean(),
nullable=False,
server_default='1'),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.Column('deleted_at',
sa.DateTime(),
nullable=True,
server_default=None),
sa.PrimaryKeyConstraint('id'),
sa.Index('tenant_id', 'tenant_id'),
sa.Index('deleted_at', 'deleted_at'),
sa.Index('fk_alarm_definition_severity', 'severity'),
sa.ForeignKeyConstraint(['severity'],
['alarm_definition_severity.name']),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'alarm',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('alarm_definition_id',
sa.String(length=36),
nullable=False,
server_default=''),
sa.Column('state',
sa.String(length=20),
nullable=False),
sa.Column('lifecycle_state',
sa.String(length=50, collation=False),
nullable=True,
server_default=None),
sa.Column('link',
sa.String(length=512, collation=False),
nullable=True,
server_default=None),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('state_updated_at',
sa.DateTime(),
nullable=True),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.Index('alarm_definition_id', 'alarm_definition_id'),
sa.Index('fk_alarm_alarm_state', 'state'),
sa.ForeignKeyConstraint(['alarm_definition_id'],
['alarm_definition.id'],
name='fk_alarm_definition_id',
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['state'],
['alarm_state.name'],
name='fk_alarm_alarm_state'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'notification_method',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('name',
sa.String(length=250),
nullable=True,
server_default=None),
sa.Column('type',
sa.String(length=20),
# Note: the typo below is deliberate since we need to match
# the constraint name from the SQL script where it is
# misspelled as well.
sa.ForeignKey('notification_method_type.name',
name='fk_alarm_noticication_method_type'),
nullable=False),
sa.Column('address',
sa.String(length=512),
nullable=True,
server_default=None),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'alarm_action',
sa.Column('alarm_definition_id',
sa.String(length=36),
nullable=False,),
sa.Column('alarm_state',
sa.String(length=20),
nullable=False),
sa.Column('action_id',
sa.String(length=36),
nullable=False),
sa.PrimaryKeyConstraint('alarm_definition_id', 'alarm_state',
'action_id'),
sa.ForeignKeyConstraint(['action_id'],
['notification_method.id'],
name='fk_alarm_action_notification_method_id',
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['alarm_state'],
['alarm_state.name']),
sa.ForeignKeyConstraint(['alarm_definition_id'],
['alarm_definition.id'],
ondelete='CASCADE'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'alarm_metric',
sa.Column('alarm_id',
sa.String(length=36),
nullable=False),
sa.Column('metric_definition_dimensions_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.PrimaryKeyConstraint('alarm_id', 'metric_definition_dimensions_id'),
sa.Index('alarm_id', 'alarm_id'),
sa.Index('metric_definition_dimensions_id', 'metric_definition_dimensions_id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
# For some mysterious alembic/sqlalchemy reason this foreign key constraint
# ends up missing when specified upon table creation. Hence we need to add
# it through an ALTER TABLE operation:
op.create_foreign_key('fk_alarm_id',
'alarm_metric',
'alarm',
['alarm_id'],
['id'], ondelete='CASCADE')
op.create_table(
'metric_definition',
sa.Column('id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('name',
sa.String(length=255),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('region',
sa.String(length=255),
nullable=False,
server_default=''),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'metric_definition_dimensions',
sa.Column('id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('metric_definition_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('metric_dimension_set_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.PrimaryKeyConstraint('id'),
sa.Index('metric_definition_id', 'metric_definition_id'),
sa.Index('metric_dimension_set_id', 'metric_dimension_set_id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
# mysql limits the size of a unique key to 767 bytes. The utf8mb4 charset
# requires 4 bytes to be allocated for each character while the utf8
# charset requires 3 bytes. The utf8 charset should be sufficient for any
# reasonable characters, see the definition of supplementary characters for
# what it doesn't support. Even with utf8, the unique key length would be
# 785 bytes so only a subset of the name is used. Potentially the size of
# the name should be limited to 250 characters which would resolve this
# issue.
#
# The unique key is required to allow high performance inserts without
# doing a select by using the "insert into metric_dimension ... on
# duplicate key update dimension_set_id=dimension_set_id syntax
op.create_table(
'metric_dimension',
sa.Column('dimension_set_id',
sa.BINARY(20),
nullable=False,
server_default='\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'),
sa.Column('name',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Column('value',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Index('metric_dimension_key',
'dimension_set_id', 'name',
unique=True,
mysql_length={'name': 252}),
sa.Index('dimension_set_id', 'dimension_set_id'),
mysql_charset='utf8',
mysql_collate='utf8_unicode_ci',
mysql_comment='PRIMARY KEY (`id`)')
op.create_table(
'sub_alarm_definition',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('alarm_definition_id',
sa.String(length=36),
sa.ForeignKey('alarm_definition.id', ondelete='CASCADE',
name='fk_sub_alarm_definition'),
nullable=False,
server_default=''),
sa.Column('function',
sa.String(length=10),
nullable=False),
sa.Column('metric_name',
sa.String(length=100),
nullable=True,
server_default=None),
sa.Column('operator',
sa.String(length=5),
nullable=False),
sa.Column('threshold',
sa.dialects.mysql.DOUBLE(),
nullable=False),
sa.Column('period',
sa.Integer(),
nullable=False),
sa.Column('periods',
sa.Integer(),
nullable=False),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'sub_alarm_definition_dimension',
sa.Column('sub_alarm_definition_id',
sa.String(length=36),
sa.ForeignKey('sub_alarm_definition.id', ondelete='CASCADE',
name='fk_sub_alarm_definition_dimension'),
nullable=False,
server_default=''),
sa.Column('dimension_name',
sa.String(length=255),
nullable=False,
server_default=''),
sa.Column('value',
sa.String(length=255),
nullable=True,
server_default=None),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'sub_alarm',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('alarm_id',
sa.String(length=36),
sa.ForeignKey('alarm.id', ondelete='CASCADE',
name='fk_sub_alarm'),
nullable=False,
server_default=''),
sa.Column('sub_expression_id',
sa.String(length=36),
sa.ForeignKey('sub_alarm_definition.id',
name='fk_sub_alarm_expr'),
nullable=False,
server_default=''),
sa.Column('expression',
sa.dialects.mysql.LONGTEXT(),
nullable=False),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'schema_migrations',
sa.Column('version',
sa.String(length=255),
nullable=False),
sa.UniqueConstraint('version', name='unique_schema_migrations'),
mysql_charset='latin1')
op.create_table(
'stream_definition',
sa.Column('id',
sa.String(length=36),
nullable=False),
sa.Column('tenant_id',
sa.String(length=36),
nullable=False),
sa.Column('name',
sa.String(length=190),
nullable=False,
server_default=''),
sa.Column('description',
sa.String(length=255),
nullable=True,
server_default=None),
sa.Column('select_by',
sa.dialects.mysql.LONGTEXT(),
nullable=True,
server_default=None),
sa.Column('group_by',
sa.dialects.mysql.LONGTEXT(length=20),
nullable=True,
server_default=None),
sa.Column('fire_criteria',
sa.dialects.mysql.LONGTEXT(length=20),
nullable=True,
server_default=None),
sa.Column('expiration',
sa.dialects.mysql.INTEGER(display_width=10,
unsigned=True),
nullable=True,
server_default='0'),
sa.Column('actions_enabled',
sa.Boolean(),
nullable=False,
server_default='1'),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.Column('deleted_at',
sa.DateTime(),
nullable=True,
server_default=None),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('tenant_id', 'name', name='tenant_name'),
sa.Index('name', 'name'),
sa.Index('tenant_id', 'tenant_id'),
sa.Index('deleted_at', 'deleted_at'),
sa.Index('created_at', 'created_at'),
sa.Index('updated_at', 'updated_at'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'stream_actions',
sa.Column('stream_definition_id',
sa.String(length=36),
sa.ForeignKey
('stream_definition.id',
name='fk_stream_action_stream_definition_id',
ondelete='CASCADE'),
nullable=False),
sa.Column('action_id',
sa.String(length=36),
sa.ForeignKey('notification_method.id',
name='fk_stream_action_notification_method_id',
ondelete='CASCADE'),
nullable=False),
sa.Column('action_type',
sa.String(length=20),
sa.ForeignKey('stream_actions_action_type.name'),
nullable=False),
sa.PrimaryKeyConstraint('stream_definition_id', 'action_id',
'action_type'),
sa.Index('stream_definition_id', 'stream_definition_id'),
sa.Index('action_type', 'action_type'),
mysql_charset='utf8mb4',
mysql_collate='utf8mb4_unicode_ci')
op.create_table(
'event_transform',
sa.Column('id',
sa.dialects.mysql.VARCHAR(length=36, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('tenant_id',
sa.dialects.mysql.VARCHAR(length=36, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('name',
sa.dialects.mysql.VARCHAR(length=64, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('description',
sa.dialects.mysql.VARCHAR(length=250, charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('specification',
sa.dialects.mysql.LONGTEXT(charset='utf8mb4',
collation='utf8mb4_unicode_ci'),
nullable=False),
sa.Column('enabled',
sa.Boolean(),
nullable=True,
server_default=None),
sa.Column('created_at',
sa.DateTime(),
nullable=False),
sa.Column('updated_at',
sa.DateTime(),
nullable=False),
sa.Column('deleted_at',
sa.DateTime(),
nullable=True,
server_default=None),
sa.PrimaryKeyConstraint('id'),
sa.Index('name', 'name'),
sa.Index('tenant_id', 'tenant_id'),
sa.Index('deleted_at', 'deleted_at'),
sa.Index('created_at', 'created_at'),
sa.Index('updated_at', 'updated_at'),
sa.UniqueConstraint('tenant_id', 'name', name='tenant_name'),
mysql_charset='utf8mb4')
def downgrade():
op.drop_table('alarm_state')
op.drop_table('alarm_definition_severity')
op.drop_table('notification_method_type')
op.drop_table('stream_actions_action_type')
op.drop_table('alarm_definition')
op.drop_table('alarm')
op.drop_table('notification_method')
op.drop_table('alarm_action')
op.drop_table('alarm_metric')
op.drop_table('metric_definition')
op.drop_table('metric_definition_dimensions')
op.drop_table('metric_dimension')
op.drop_table('sub_alarm_definition')
op.drop_table('sub_alarm_definition_dimension')
op.drop_table('sub_alarm')
op.drop_table('schema_migrations')
op.drop_table('stream_definition')
op.drop_table('stream_actions')
op.drop_table('event_transform')
|
MikkCZ/kitsune
|
refs/heads/master
|
kitsune/tags/tests/test_templatetags.py
|
6
|
from mock import Mock
from nose.tools import eq_
from taggit.models import Tag
from kitsune.sumo.tests import TestCase
from kitsune.tags.templatetags.jinja_helpers import tags_to_text
class TestTagsToText(TestCase):
def test_no_tags(self):
eq_('', tags_to_text([]))
def test_one_tag(self):
eq_('tag1', tags_to_text([_tag('tag1')]))
def test_two_tags(self):
eq_('tag1,tag2', tags_to_text([_tag('tag1'), _tag('tag2')]))
def test_three_tags(self):
eq_('tag1,tag2,tag3', tags_to_text(
[_tag('tag1'), _tag('tag2'), _tag('tag3')]))
def _tag(slug):
tag = Mock(spec=Tag)
tag.slug = slug
return tag
|
rmboggs/django
|
refs/heads/master
|
django/db/backends/postgresql/base.py
|
143
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import utc_tzinfo_factory # isort:skip
from .version import get_version # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
if conn_timezone_name != self.timezone_name:
cursor = self.connection.cursor()
try:
cursor.execute(self.ops.set_time_zone_sql(), [self.timezone_name])
finally:
cursor.close()
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
|
dongwoooo/project_flask
|
refs/heads/master
|
lib/tweepy/cursor.py
|
27
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from tweepy.error import TweepError
class Cursor(object):
"""Pagination helper class"""
def __init__(self, method, *args, **kargs):
if hasattr(method, 'pagination_mode'):
if method.pagination_mode == 'cursor':
self.iterator = CursorIterator(method, args, kargs)
elif method.pagination_mode == 'id':
self.iterator = IdIterator(method, args, kargs)
elif method.pagination_mode == 'page':
self.iterator = PageIterator(method, args, kargs)
else:
raise TweepError('Invalid pagination mode.')
else:
raise TweepError('This method does not perform pagination')
def pages(self, limit=0):
"""Return iterator for pages"""
if limit > 0:
self.iterator.limit = limit
return self.iterator
def items(self, limit=0):
"""Return iterator for items in each page"""
i = ItemIterator(self.iterator)
i.limit = limit
return i
class BaseIterator(object):
def __init__(self, method, args, kargs):
self.method = method
self.args = args
self.kargs = kargs
self.limit = 0
def next(self):
raise NotImplementedError
def prev(self):
raise NotImplementedError
def __iter__(self):
return self
class CursorIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.next_cursor = -1
self.prev_cursor = 0
self.count = 0
def next(self):
if self.next_cursor == 0 or (self.limit and self.count == self.limit):
raise StopIteration
data, cursors = self.method(
cursor=self.next_cursor, *self.args, **self.kargs
)
self.prev_cursor, self.next_cursor = cursors
if len(data) == 0:
raise StopIteration
self.count += 1
return data
def prev(self):
if self.prev_cursor == 0:
raise TweepError('Can not page back more, at first page')
data, self.next_cursor, self.prev_cursor = self.method(
cursor=self.prev_cursor, *self.args, **self.kargs
)
self.count -= 1
return data
class IdIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.max_id = kargs.get('max_id')
self.since_id = kargs.get('since_id')
self.count = 0
def next(self):
"""Fetch a set of items with IDs less than current set."""
if self.limit and self.limit == self.count:
raise StopIteration
# max_id is inclusive so decrement by one
# to avoid requesting duplicate items.
max_id = self.since_id - 1 if self.max_id else None
data = self.method(max_id = max_id, *self.args, **self.kargs)
if len(data) == 0:
raise StopIteration
self.max_id = data.max_id
self.since_id = data.since_id
self.count += 1
return data
def prev(self):
"""Fetch a set of items with IDs greater than current set."""
if self.limit and self.limit == self.count:
raise StopIteration
since_id = self.max_id
data = self.method(since_id = since_id, *self.args, **self.kargs)
if len(data) == 0:
raise StopIteration
self.max_id = data.max_id
self.since_id = data.since_id
self.count += 1
return data
class PageIterator(BaseIterator):
def __init__(self, method, args, kargs):
BaseIterator.__init__(self, method, args, kargs)
self.current_page = 0
def next(self):
self.current_page += 1
items = self.method(page=self.current_page, *self.args, **self.kargs)
if len(items) == 0 or (self.limit > 0 and self.current_page > self.limit):
raise StopIteration
return items
def prev(self):
if (self.current_page == 1):
raise TweepError('Can not page back more, at first page')
self.current_page -= 1
return self.method(page=self.current_page, *self.args, **self.kargs)
class ItemIterator(BaseIterator):
def __init__(self, page_iterator):
self.page_iterator = page_iterator
self.limit = 0
self.current_page = None
self.page_index = -1
self.count = 0
def next(self):
if self.limit > 0 and self.count == self.limit:
raise StopIteration
if self.current_page is None or self.page_index == len(self.current_page) - 1:
# Reached end of current page, get the next page...
self.current_page = self.page_iterator.next()
self.page_index = -1
self.page_index += 1
self.count += 1
return self.current_page[self.page_index]
def prev(self):
if self.current_page is None:
raise TweepError('Can not go back more, at first page')
if self.page_index == 0:
# At the beginning of the current page, move to next...
self.current_page = self.page_iterator.prev()
self.page_index = len(self.current_page)
if self.page_index == 0:
raise TweepError('No more items')
self.page_index -= 1
self.count -= 1
return self.current_page[self.page_index]
|
kingmotley/SickRage
|
refs/heads/master
|
lib/unidecode/x01d.py
|
240
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'b', # 0x6c
'd', # 0x6d
'f', # 0x6e
'm', # 0x6f
'n', # 0x70
'p', # 0x71
'r', # 0x72
'r', # 0x73
's', # 0x74
't', # 0x75
'z', # 0x76
'g', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'p', # 0x7d
'', # 0x7e
'', # 0x7f
'b', # 0x80
'd', # 0x81
'f', # 0x82
'g', # 0x83
'k', # 0x84
'l', # 0x85
'm', # 0x86
'n', # 0x87
'p', # 0x88
'r', # 0x89
's', # 0x8a
'', # 0x8b
'v', # 0x8c
'x', # 0x8d
'z', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/conch/insults/colors.py
|
146
|
"""
You don't really want to use this module. Try helper.py instead.
"""
CLEAR = 0
BOLD = 1
DIM = 2
ITALIC = 3
UNDERSCORE = 4
BLINK_SLOW = 5
BLINK_FAST = 6
REVERSE = 7
CONCEALED = 8
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 43
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
|
ray-zhong/github_trend_spider
|
refs/heads/master
|
ENV/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
|
327
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from ..resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On macOS before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
hashworks/CouchPotatoServer
|
refs/heads/forkDev
|
libs/tornado/options.py
|
79
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A command line parsing module that lets modules define their own options.
Each module defines its own options which are added to the global
option namespace, e.g.::
from tornado.options import define, options
define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
define("memcache_hosts", default="127.0.0.1:11011", multiple=True,
help="Main user memcache servers")
def connect():
db = database.Connection(options.mysql_host)
...
The ``main()`` method of your application does not need to be aware of all of
the options used throughout your program; they are all automatically loaded
when the modules are loaded. However, all modules that define options
must have been imported before the command line is parsed.
Your ``main()`` method can parse the command line or parse a config file with
either::
tornado.options.parse_command_line()
# or
tornado.options.parse_config_file("/etc/server.conf")
Command line formats are what you would expect (``--myoption=myvalue``).
Config files are just Python files. Global names become options, e.g.::
myoption = "myvalue"
myotheroption = "myothervalue"
We support `datetimes <datetime.datetime>`, `timedeltas
<datetime.timedelta>`, ints, and floats (just pass a ``type`` kwarg to
`define`). We also accept multi-value options. See the documentation for
`define()` below.
`tornado.options.options` is a singleton instance of `OptionParser`, and
the top-level functions in this module (`define`, `parse_command_line`, etc)
simply call methods on it. You may create additional `OptionParser`
instances to define isolated sets of options, such as for subcommands.
.. note::
By default, several options are defined that will configure the
standard `logging` module when `parse_command_line` or `parse_config_file`
are called. If you want Tornado to leave the logging configuration
alone so you can manage it yourself, either pass ``--logging=none``
on the command line or do the following to disable it in code::
from tornado.options import options, parse_command_line
options.logging = None
parse_command_line()
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import numbers
import re
import sys
import os
import textwrap
from tornado.escape import _unicode, native_str
from tornado.log import define_logging_options
from tornado import stack_context
from tornado.util import basestring_type, exec_in
class Error(Exception):
"""Exception raised by errors in the options module."""
pass
class OptionParser(object):
"""A collection of options, a dictionary with object-like access.
Normally accessed via static functions in the `tornado.options` module,
which reference a global instance.
"""
def __init__(self):
# we have to use self.__dict__ because we override setattr.
self.__dict__['_options'] = {}
self.__dict__['_parse_callbacks'] = []
self.define("help", type=bool, help="show this help information",
callback=self._help_callback)
def __getattr__(self, name):
if isinstance(self._options.get(name), _Option):
return self._options[name].value()
raise AttributeError("Unrecognized option %r" % name)
def __setattr__(self, name, value):
if isinstance(self._options.get(name), _Option):
return self._options[name].set(value)
raise AttributeError("Unrecognized option %r" % name)
def __iter__(self):
return iter(self._options)
def __getitem__(self, item):
return self._options[item].value()
def items(self):
"""A sequence of (name, value) pairs.
.. versionadded:: 3.1
"""
return [(name, opt.value()) for name, opt in self._options.items()]
def groups(self):
"""The set of option-groups created by ``define``.
.. versionadded:: 3.1
"""
return set(opt.group_name for opt in self._options.values())
def group_dict(self, group):
"""The names and values of options in a group.
Useful for copying options into Application settings::
from tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1
"""
return dict(
(name, opt.value()) for name, opt in self._options.items()
if not group or group == opt.group_name)
def as_dict(self):
"""The names and values of all options.
.. versionadded:: 3.1
"""
return dict(
(name, opt.value()) for name, opt in self._options.items())
def define(self, name, default=None, type=None, help=None, metavar=None,
multiple=False, group=None, callback=None):
"""Defines a new command line option.
If ``type`` is given (one of str, float, int, datetime, or timedelta)
or can be inferred from the ``default``, we parse the command line
arguments based on the given type. If ``multiple`` is True, we accept
comma-separated values, and the option value is always a list.
For multi-value integers, we also accept the syntax ``x:y``, which
turns into ``range(x, y)`` - very useful for long integer ranges.
``help`` and ``metavar`` are used to construct the
automatically generated command line help string. The help
message is formatted like::
--name=METAVAR help string
``group`` is used to group the defined options in logical
groups. By default, command line options are grouped by the
file in which they are defined.
Command line option names must be unique globally. They can be parsed
from the command line with `parse_command_line` or parsed from a
config file with `parse_config_file`.
If a ``callback`` is given, it will be run with the new value whenever
the option is changed. This can be used to combine command-line
and file-based options::
define("config", type=str, help="path to config file",
callback=lambda path: parse_config_file(path, final=False))
With this definition, options in the file specified by ``--config`` will
override options set earlier on the command line, but can be overridden
by later flags.
"""
if name in self._options:
raise Error("Option %r already defined in %s" %
(name, self._options[name].file_name))
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
# Can be called directly, or through top level define() fn, in which
# case, step up above that frame to look for real caller.
if (frame.f_back.f_code.co_filename == options_file and
frame.f_back.f_code.co_name == 'define'):
frame = frame.f_back
file_name = frame.f_back.f_code.co_filename
if file_name == options_file:
file_name = ""
if type is None:
if not multiple and default is not None:
type = default.__class__
else:
type = str
if group:
group_name = group
else:
group_name = file_name
self._options[name] = _Option(name, file_name=file_name,
default=default, type=type, help=help,
metavar=metavar, multiple=multiple,
group_name=group_name,
callback=callback)
def parse_command_line(self, args=None, final=True):
"""Parses all options given on the command line (defaults to
`sys.argv`).
Note that ``args[0]`` is ignored since it is the program name
in `sys.argv`.
We return a list of all arguments that are not parsed as options.
If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations
from multiple sources.
"""
if args is None:
args = sys.argv
remaining = []
for i in range(1, len(args)):
# All things after the last option are command line arguments
if not args[i].startswith("-"):
remaining = args[i:]
break
if args[i] == "--":
remaining = args[i + 1:]
break
arg = args[i].lstrip("-")
name, equals, value = arg.partition("=")
name = name.replace('-', '_')
if not name in self._options:
self.print_help()
raise Error('Unrecognized command line option: %r' % name)
option = self._options[name]
if not equals:
if option.type == bool:
value = "true"
else:
raise Error('Option %r requires a value' % name)
option.parse(value)
if final:
self.run_parse_callbacks()
return remaining
def parse_config_file(self, path, final=True):
"""Parses and loads the Python config file at the given path.
If ``final`` is ``False``, parse callbacks will not be run.
This is useful for applications that wish to combine configurations
from multiple sources.
.. versionchanged:: 4.1
Config files are now always interpreted as utf-8 instead of
the system default encoding.
"""
config = {}
with open(path, 'rb') as f:
exec_in(native_str(f.read()), config, config)
for name in config:
if name in self._options:
self._options[name].set(config[name])
if final:
self.run_parse_callbacks()
def print_help(self, file=None):
"""Prints all the command line options to stderr (or another file)."""
if file is None:
file = sys.stderr
print("Usage: %s [OPTIONS]" % sys.argv[0], file=file)
print("\nOptions:\n", file=file)
by_group = {}
for option in self._options.values():
by_group.setdefault(option.group_name, []).append(option)
for filename, o in sorted(by_group.items()):
if filename:
print("\n%s options:\n" % os.path.normpath(filename), file=file)
o.sort(key=lambda option: option.name)
for option in o:
prefix = option.name
if option.metavar:
prefix += "=" + option.metavar
description = option.help or ""
if option.default is not None and option.default != '':
description += " (default %s)" % option.default
lines = textwrap.wrap(description, 79 - 35)
if len(prefix) > 30 or len(lines) == 0:
lines.insert(0, '')
print(" --%-30s %s" % (prefix, lines[0]), file=file)
for line in lines[1:]:
print("%-34s %s" % (' ', line), file=file)
print(file=file)
def _help_callback(self, value):
if value:
self.print_help()
sys.exit(0)
def add_parse_callback(self, callback):
"""Adds a parse callback, to be invoked when option parsing is done."""
self._parse_callbacks.append(stack_context.wrap(callback))
def run_parse_callbacks(self):
for callback in self._parse_callbacks:
callback()
def mockable(self):
"""Returns a wrapper around self that is compatible with
`mock.patch <unittest.mock.patch>`.
The `mock.patch <unittest.mock.patch>` function (included in
the standard library `unittest.mock` package since Python 3.3,
or in the third-party ``mock`` package for older versions of
Python) is incompatible with objects like ``options`` that
override ``__getattr__`` and ``__setattr__``. This function
returns an object that can be used with `mock.patch.object
<unittest.mock.patch.object>` to modify option values::
with mock.patch.object(options.mockable(), 'name', value):
assert options.name == value
"""
return _Mockable(self)
class _Mockable(object):
"""`mock.patch` compatible wrapper for `OptionParser`.
As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
the attribute it set instead of setting a new one (assuming that
the object does not catpure ``__setattr__``, so the patch
created a new attribute in ``__dict__``).
_Mockable's getattr and setattr pass through to the underlying
OptionParser, and delattr undoes the effect of a previous setattr.
"""
def __init__(self, options):
# Modify __dict__ directly to bypass __setattr__
self.__dict__['_options'] = options
self.__dict__['_originals'] = {}
def __getattr__(self, name):
return getattr(self._options, name)
def __setattr__(self, name, value):
assert name not in self._originals, "don't reuse mockable objects"
self._originals[name] = getattr(self._options, name)
setattr(self._options, name, value)
def __delattr__(self, name):
setattr(self._options, name, self._originals.pop(name))
class _Option(object):
UNSET = object()
def __init__(self, name, default=None, type=basestring_type, help=None,
metavar=None, multiple=False, file_name=None, group_name=None,
callback=None):
if default is None and multiple:
default = []
self.name = name
self.type = type
self.help = help
self.metavar = metavar
self.multiple = multiple
self.file_name = file_name
self.group_name = group_name
self.callback = callback
self.default = default
self._value = _Option.UNSET
def value(self):
return self.default if self._value is _Option.UNSET else self._value
def parse(self, value):
_parse = {
datetime.datetime: self._parse_datetime,
datetime.timedelta: self._parse_timedelta,
bool: self._parse_bool,
basestring_type: self._parse_string,
}.get(self.type, self.type)
if self.multiple:
self._value = []
for part in value.split(","):
if issubclass(self.type, numbers.Integral):
# allow ranges of the form X:Y (inclusive at both ends)
lo, _, hi = part.partition(":")
lo = _parse(lo)
hi = _parse(hi) if hi else lo
self._value.extend(range(lo, hi + 1))
else:
self._value.append(_parse(part))
else:
self._value = _parse(value)
if self.callback is not None:
self.callback(self._value)
return self.value()
def set(self, value):
if self.multiple:
if not isinstance(value, list):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
for item in value:
if item is not None and not isinstance(item, self.type):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
else:
if value is not None and not isinstance(value, self.type):
raise Error("Option %r is required to be a %s (%s given)" %
(self.name, self.type.__name__, type(value)))
self._value = value
if self.callback is not None:
self.callback(self._value)
# Supported date/time formats in our options
_DATETIME_FORMATS = [
"%a %b %d %H:%M:%S %Y",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y-%m-%dT%H:%M",
"%Y%m%d %H:%M:%S",
"%Y%m%d %H:%M",
"%Y-%m-%d",
"%Y%m%d",
"%H:%M:%S",
"%H:%M",
]
def _parse_datetime(self, value):
for format in self._DATETIME_FORMATS:
try:
return datetime.datetime.strptime(value, format)
except ValueError:
pass
raise Error('Unrecognized date/time format: %r' % value)
_TIMEDELTA_ABBREVS = [
('hours', ['h']),
('minutes', ['m', 'min']),
('seconds', ['s', 'sec']),
('milliseconds', ['ms']),
('microseconds', ['us']),
('days', ['d']),
('weeks', ['w']),
]
_TIMEDELTA_ABBREV_DICT = dict(
(abbrev, full) for full, abbrevs in _TIMEDELTA_ABBREVS
for abbrev in abbrevs)
_FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
_TIMEDELTA_PATTERN = re.compile(
r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE)
def _parse_timedelta(self, value):
try:
sum = datetime.timedelta()
start = 0
while start < len(value):
m = self._TIMEDELTA_PATTERN.match(value, start)
if not m:
raise Exception()
num = float(m.group(1))
units = m.group(2) or 'seconds'
units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
sum += datetime.timedelta(**{units: num})
start = m.end()
return sum
except Exception:
raise
def _parse_bool(self, value):
return value.lower() not in ("false", "0", "f")
def _parse_string(self, value):
return _unicode(value)
options = OptionParser()
"""Global options object.
All defined options are available as attributes on this object.
"""
def define(name, default=None, type=None, help=None, metavar=None,
multiple=False, group=None, callback=None):
"""Defines an option in the global namespace.
See `OptionParser.define`.
"""
return options.define(name, default=default, type=type, help=help,
metavar=metavar, multiple=multiple, group=group,
callback=callback)
def parse_command_line(args=None, final=True):
"""Parses global options from the command line.
See `OptionParser.parse_command_line`.
"""
return options.parse_command_line(args, final=final)
def parse_config_file(path, final=True):
"""Parses global options from a config file.
See `OptionParser.parse_config_file`.
"""
return options.parse_config_file(path, final=final)
def print_help(file=None):
"""Prints all the command line options to stderr (or another file).
See `OptionParser.print_help`.
"""
return options.print_help(file)
def add_parse_callback(callback):
"""Adds a parse callback, to be invoked when option parsing is done.
See `OptionParser.add_parse_callback`
"""
options.add_parse_callback(callback)
# Default options
define_logging_options(options)
|
TNT-Samuel/Coding-Projects
|
refs/heads/master
|
DNS Server/Source/Lib/site-packages/joblib/externals/loky/backend/semaphore_tracker.py
|
9
|
###############################################################################
# Server process to keep track of unlinked semaphores and clean them.
#
# author: Thomas Moreau
#
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
# * include custom spawnv_passfds to start the process
# * use custom unlink from our own SemLock implementation
# * add some VERBOSE logging
#
#
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import os
import signal
import sys
import threading
import warnings
from . import spawn
from multiprocessing import util
try:
from _multiprocessing import sem_unlink
except ImportError:
from .semlock import sem_unlink
__all__ = ['ensure_running', 'register', 'unregister']
VERBOSE = False
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._fd is not None:
# semaphore tracker was launched before, is it still running?
if self._check_alive():
# => still alive
return
# => dead, launch it again
os.close(self._fd)
self._fd = None
self._pid = None
warnings.warn('semaphore_tracker: process died unexpectedly, '
'relaunching. Some semaphores might leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from {} import main; main(%d)'.format(main.__module__)
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
# In python 3.3, there is a bug which put `-RRRRR..` instead of
# `-R` in args. Replace it to get the correct flags.
# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488
if sys.version_info[:2] <= (3, 3):
import re
for i in range(1, len(args)):
args[i] = re.sub("-R+", "-R", args[i])
args += ['-c', cmd % r]
util.debug("launching Semaphore tracker: {}".format(args))
pid = spawnv_passfds(exe, args, fds_to_pass)
except BaseException:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
os.close(r)
def _check_alive(self):
'''Check for the existence of the semaphore tracker process.'''
try:
self._send('PROBE', '')
except BrokenPipeError:
return False
else:
return True
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self.ensure_running()
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self.ensure_running()
self._send('UNREGISTER', name)
def _send(self, cmd, name):
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
if VERBOSE: # pragma: no cover
sys.stderr.write("Main semaphore tracker is running\n")
sys.stderr.flush()
cache = set()
try:
# keep track of registered/unregistered semaphores
with os.fdopen(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
name = name.decode('ascii')
cache.add(name)
if VERBOSE: # pragma: no cover
sys.stderr.write("[SemaphoreTracker] register {}\n"
.format(name))
sys.stderr.flush()
elif cmd == b'UNREGISTER':
name = name.decode('ascii')
cache.remove(name)
if VERBOSE: # pragma: no cover
sys.stderr.write("[SemaphoreTracker] unregister {}"
": cache({})\n"
.format(name, len(cache)))
sys.stderr.flush()
elif cmd == b'PROBE':
pass
else:
raise RuntimeError('unrecognized command %r' % cmd)
except BaseException:
try:
sys.excepthook(*sys.exc_info())
except BaseException:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
try:
sem_unlink(name)
if VERBOSE: # pragma: no cover
sys.stderr.write("[SemaphoreTracker] unlink {}\n"
.format(name))
sys.stderr.flush()
except Exception as e:
warnings.warn('semaphore_tracker: %r: %r' % (name, e))
finally:
pass
if VERBOSE: # pragma: no cover
sys.stderr.write("semaphore tracker shut down\n")
sys.stderr.flush()
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
passfds = sorted(passfds)
errpipe_read, errpipe_write = os.pipe()
try:
from .reduction import _mk_inheritable
_pass = []
for fd in passfds:
_pass += [_mk_inheritable(fd)]
from .fork_exec import fork_exec
return fork_exec(args, _pass)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
|
testmana2/test
|
refs/heads/master
|
Plugins/VcsPlugins/vcsMercurial/ProjectHelper.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the VCS project helper for Mercurial.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtWidgets import QMenu, QToolBar
from E5Gui import E5MessageBox
from E5Gui.E5Application import e5App
from VCS.ProjectHelper import VcsProjectHelper
from E5Gui.E5Action import E5Action
import UI.PixmapCache
import Preferences
class HgProjectHelper(VcsProjectHelper):
"""
Class implementing the VCS project helper for Mercurial.
"""
def __init__(self, vcsObject, projectObject, parent=None, name=None):
"""
Constructor
@param vcsObject reference to the vcs object
@param projectObject reference to the project object
@param parent parent widget (QWidget)
@param name name of this object (string)
"""
VcsProjectHelper.__init__(self, vcsObject, projectObject, parent, name)
# instantiate the extensions
from .QueuesExtension.ProjectHelper import QueuesProjectHelper
from .FetchExtension.ProjectHelper import FetchProjectHelper
from .PurgeExtension.ProjectHelper import PurgeProjectHelper
from .GpgExtension.ProjectHelper import GpgProjectHelper
from .TransplantExtension.ProjectHelper import TransplantProjectHelper
from .RebaseExtension.ProjectHelper import RebaseProjectHelper
from .ShelveExtension.ProjectHelper import ShelveProjectHelper
from .LargefilesExtension.ProjectHelper import LargefilesProjectHelper
self.__extensions = {
"mq": QueuesProjectHelper(),
"fetch": FetchProjectHelper(),
"purge": PurgeProjectHelper(),
"gpg": GpgProjectHelper(),
"transplant": TransplantProjectHelper(),
"rebase": RebaseProjectHelper(),
"shelve": ShelveProjectHelper(),
"largefiles": LargefilesProjectHelper(),
}
self.__extensionMenuTitles = {}
for extension in self.__extensions:
self.__extensionMenuTitles[
self.__extensions[extension].menuTitle()] = extension
def setObjects(self, vcsObject, projectObject):
"""
Public method to set references to the vcs and project objects.
@param vcsObject reference to the vcs object
@param projectObject reference to the project object
"""
self.vcs = vcsObject
self.project = projectObject
for extension in self.__extensions.values():
extension.setObjects(vcsObject, projectObject)
self.vcs.iniFileChanged.connect(self.__checkActions)
def getProject(self):
"""
Public method to get a reference to the project object.
@return reference to the project object (Project)
"""
return self.project
def getActions(self):
"""
Public method to get a list of all actions.
@return list of all actions (list of E5Action)
"""
actions = self.actions[:]
for extension in self.__extensions.values():
actions.extend(extension.getActions())
return actions
def initActions(self):
"""
Public method to generate the action objects.
"""
self.vcsNewAct = E5Action(
self.tr('New from repository'),
UI.PixmapCache.getIcon("vcsCheckout.png"),
self.tr('&New from repository...'), 0, 0,
self, 'mercurial_new')
self.vcsNewAct.setStatusTip(self.tr(
'Create (clone) a new project from a Mercurial repository'
))
self.vcsNewAct.setWhatsThis(self.tr(
"""<b>New from repository</b>"""
"""<p>This creates (clones) a new local project from """
"""a Mercurial repository.</p>"""
))
self.vcsNewAct.triggered.connect(self._vcsCheckout)
self.actions.append(self.vcsNewAct)
self.hgIncomingAct = E5Action(
self.tr('Show incoming log'),
UI.PixmapCache.getIcon("vcsUpdate.png"),
self.tr('Show incoming log'),
0, 0, self, 'mercurial_incoming')
self.hgIncomingAct.setStatusTip(self.tr(
'Show the log of incoming changes'
))
self.hgIncomingAct.setWhatsThis(self.tr(
"""<b>Show incoming log</b>"""
"""<p>This shows the log of changes coming into the"""
""" repository.</p>"""
))
self.hgIncomingAct.triggered.connect(self.__hgIncoming)
self.actions.append(self.hgIncomingAct)
self.hgPullAct = E5Action(
self.tr('Pull changes'),
UI.PixmapCache.getIcon("vcsUpdate.png"),
self.tr('Pull changes'),
0, 0, self, 'mercurial_pull')
self.hgPullAct.setStatusTip(self.tr(
'Pull changes from a remote repository'
))
self.hgPullAct.setWhatsThis(self.tr(
"""<b>Pull changes</b>"""
"""<p>This pulls changes from a remote repository into the """
"""local repository.</p>"""
))
self.hgPullAct.triggered.connect(self.__hgPull)
self.actions.append(self.hgPullAct)
self.vcsUpdateAct = E5Action(
self.tr('Update from repository'),
UI.PixmapCache.getIcon("vcsUpdate.png"),
self.tr('&Update from repository'), 0, 0, self,
'mercurial_update')
self.vcsUpdateAct.setStatusTip(self.tr(
'Update the local project from the Mercurial repository'
))
self.vcsUpdateAct.setWhatsThis(self.tr(
"""<b>Update from repository</b>"""
"""<p>This updates the local project from the Mercurial"""
""" repository.</p>"""
))
self.vcsUpdateAct.triggered.connect(self._vcsUpdate)
self.actions.append(self.vcsUpdateAct)
self.vcsCommitAct = E5Action(
self.tr('Commit changes to repository'),
UI.PixmapCache.getIcon("vcsCommit.png"),
self.tr('&Commit changes to repository...'), 0, 0, self,
'mercurial_commit')
self.vcsCommitAct.setStatusTip(self.tr(
'Commit changes to the local project to the Mercurial repository'
))
self.vcsCommitAct.setWhatsThis(self.tr(
"""<b>Commit changes to repository</b>"""
"""<p>This commits changes to the local project to the """
"""Mercurial repository.</p>"""
))
self.vcsCommitAct.triggered.connect(self._vcsCommit)
self.actions.append(self.vcsCommitAct)
self.hgOutgoingAct = E5Action(
self.tr('Show outgoing log'),
UI.PixmapCache.getIcon("vcsCommit.png"),
self.tr('Show outgoing log'),
0, 0, self, 'mercurial_outgoing')
self.hgOutgoingAct.setStatusTip(self.tr(
'Show the log of outgoing changes'
))
self.hgOutgoingAct.setWhatsThis(self.tr(
"""<b>Show outgoing log</b>"""
"""<p>This shows the log of changes outgoing out of the"""
""" repository.</p>"""
))
self.hgOutgoingAct.triggered.connect(self.__hgOutgoing)
self.actions.append(self.hgOutgoingAct)
self.hgPushAct = E5Action(
self.tr('Push changes'),
UI.PixmapCache.getIcon("vcsCommit.png"),
self.tr('Push changes'),
0, 0, self, 'mercurial_push')
self.hgPushAct.setStatusTip(self.tr(
'Push changes to a remote repository'
))
self.hgPushAct.setWhatsThis(self.tr(
"""<b>Push changes</b>"""
"""<p>This pushes changes from the local repository to a """
"""remote repository.</p>"""
))
self.hgPushAct.triggered.connect(self.__hgPush)
self.actions.append(self.hgPushAct)
self.hgPushForcedAct = E5Action(
self.tr('Push changes (force)'),
UI.PixmapCache.getIcon("vcsCommit.png"),
self.tr('Push changes (force)'),
0, 0, self, 'mercurial_push_forced')
self.hgPushForcedAct.setStatusTip(self.tr(
'Push changes to a remote repository with force option'
))
self.hgPushForcedAct.setWhatsThis(self.tr(
"""<b>Push changes (force)</b>"""
"""<p>This pushes changes from the local repository to a """
"""remote repository using the 'force' option.</p>"""
))
self.hgPushForcedAct.triggered.connect(self.__hgPushForced)
self.actions.append(self.hgPushForcedAct)
self.vcsExportAct = E5Action(
self.tr('Export from repository'),
UI.PixmapCache.getIcon("vcsExport.png"),
self.tr('&Export from repository...'),
0, 0, self, 'mercurial_export_repo')
self.vcsExportAct.setStatusTip(self.tr(
'Export a project from the repository'
))
self.vcsExportAct.setWhatsThis(self.tr(
"""<b>Export from repository</b>"""
"""<p>This exports a project from the repository.</p>"""
))
self.vcsExportAct.triggered.connect(self._vcsExport)
self.actions.append(self.vcsExportAct)
self.vcsLogAct = E5Action(
self.tr('Show log'),
UI.PixmapCache.getIcon("vcsLog.png"),
self.tr('Show &log'),
0, 0, self, 'mercurial_log')
self.vcsLogAct.setStatusTip(self.tr(
'Show the log of the local project'
))
self.vcsLogAct.setWhatsThis(self.tr(
"""<b>Show log</b>"""
"""<p>This shows the log of the local project.</p>"""
))
self.vcsLogAct.triggered.connect(self._vcsLog)
self.actions.append(self.vcsLogAct)
self.hgLogBrowserAct = E5Action(
self.tr('Show log browser'),
UI.PixmapCache.getIcon("vcsLog.png"),
self.tr('Show log browser'),
0, 0, self, 'mercurial_log_browser')
self.hgLogBrowserAct.setStatusTip(self.tr(
'Show a dialog to browse the log of the local project'
))
self.hgLogBrowserAct.setWhatsThis(self.tr(
"""<b>Show log browser</b>"""
"""<p>This shows a dialog to browse the log of the local"""
""" project. A limited number of entries is shown first."""
""" More can be retrieved later on.</p>"""
))
self.hgLogBrowserAct.triggered.connect(self._vcsLogBrowser)
self.actions.append(self.hgLogBrowserAct)
self.vcsDiffAct = E5Action(
self.tr('Show differences'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show &difference'),
0, 0, self, 'mercurial_diff')
self.vcsDiffAct.setStatusTip(self.tr(
'Show the difference of the local project to the repository'
))
self.vcsDiffAct.setWhatsThis(self.tr(
"""<b>Show differences</b>"""
"""<p>This shows differences of the local project to the"""
""" repository.</p>"""
))
self.vcsDiffAct.triggered.connect(self._vcsDiff)
self.actions.append(self.vcsDiffAct)
self.hgExtDiffAct = E5Action(
self.tr('Show differences (extended)'),
UI.PixmapCache.getIcon("vcsDiff.png"),
self.tr('Show differences (extended)'),
0, 0, self, 'mercurial_extendeddiff')
self.hgExtDiffAct.setStatusTip(self.tr(
'Show the difference of revisions of the project to the repository'
))
self.hgExtDiffAct.setWhatsThis(self.tr(
"""<b>Show differences (extended)</b>"""
"""<p>This shows differences of selectable revisions of the"""
""" project.</p>"""
))
self.hgExtDiffAct.triggered.connect(self.__hgExtendedDiff)
self.actions.append(self.hgExtDiffAct)
self.vcsStatusAct = E5Action(
self.tr('Show status'),
UI.PixmapCache.getIcon("vcsStatus.png"),
self.tr('Show &status...'),
0, 0, self, 'mercurial_status')
self.vcsStatusAct.setStatusTip(self.tr(
'Show the status of the local project'
))
self.vcsStatusAct.setWhatsThis(self.tr(
"""<b>Show status</b>"""
"""<p>This shows the status of the local project.</p>"""
))
self.vcsStatusAct.triggered.connect(self._vcsStatus)
self.actions.append(self.vcsStatusAct)
self.hgSummaryAct = E5Action(
self.tr('Show Summary'),
UI.PixmapCache.getIcon("vcsSummary.png"),
self.tr('Show summary...'),
0, 0, self, 'mercurial_summary')
self.hgSummaryAct.setStatusTip(self.tr(
'Show summary information of the working directory status'
))
self.hgSummaryAct.setWhatsThis(self.tr(
"""<b>Show summary</b>"""
"""<p>This shows some summary information of the working"""
""" directory status.</p>"""
))
self.hgSummaryAct.triggered.connect(self.__hgSummary)
self.actions.append(self.hgSummaryAct)
self.hgHeadsAct = E5Action(
self.tr('Show heads'),
self.tr('Show heads'),
0, 0, self, 'mercurial_heads')
self.hgHeadsAct.setStatusTip(self.tr(
'Show the heads of the repository'
))
self.hgHeadsAct.setWhatsThis(self.tr(
"""<b>Show heads</b>"""
"""<p>This shows the heads of the repository.</p>"""
))
self.hgHeadsAct.triggered.connect(self.__hgHeads)
self.actions.append(self.hgHeadsAct)
self.hgParentsAct = E5Action(
self.tr('Show parents'),
self.tr('Show parents'),
0, 0, self, 'mercurial_parents')
self.hgParentsAct.setStatusTip(self.tr(
'Show the parents of the repository'
))
self.hgParentsAct.setWhatsThis(self.tr(
"""<b>Show parents</b>"""
"""<p>This shows the parents of the repository.</p>"""
))
self.hgParentsAct.triggered.connect(self.__hgParents)
self.actions.append(self.hgParentsAct)
self.hgTipAct = E5Action(
self.tr('Show tip'),
self.tr('Show tip'),
0, 0, self, 'mercurial_tip')
self.hgTipAct.setStatusTip(self.tr(
'Show the tip of the repository'
))
self.hgTipAct.setWhatsThis(self.tr(
"""<b>Show tip</b>"""
"""<p>This shows the tip of the repository.</p>"""
))
self.hgTipAct.triggered.connect(self.__hgTip)
self.actions.append(self.hgTipAct)
self.vcsRevertAct = E5Action(
self.tr('Revert changes'),
UI.PixmapCache.getIcon("vcsRevert.png"),
self.tr('Re&vert changes'),
0, 0, self, 'mercurial_revert')
self.vcsRevertAct.setStatusTip(self.tr(
'Revert all changes made to the local project'
))
self.vcsRevertAct.setWhatsThis(self.tr(
"""<b>Revert changes</b>"""
"""<p>This reverts all changes made to the local project.</p>"""
))
self.vcsRevertAct.triggered.connect(self.__hgRevert)
self.actions.append(self.vcsRevertAct)
self.vcsMergeAct = E5Action(
self.tr('Merge'),
UI.PixmapCache.getIcon("vcsMerge.png"),
self.tr('Mer&ge changes...'),
0, 0, self, 'mercurial_merge')
self.vcsMergeAct.setStatusTip(self.tr(
'Merge changes of a revision into the local project'
))
self.vcsMergeAct.setWhatsThis(self.tr(
"""<b>Merge</b>"""
"""<p>This merges changes of a revision into the local"""
""" project.</p>"""
))
self.vcsMergeAct.triggered.connect(self._vcsMerge)
self.actions.append(self.vcsMergeAct)
self.hgCancelMergeAct = E5Action(
self.tr('Cancel uncommitted merge'),
self.tr('Cancel uncommitted merge'),
0, 0, self, 'mercurial_cancel_merge')
self.hgCancelMergeAct.setStatusTip(self.tr(
'Cancel an uncommitted merge and lose all changes'
))
self.hgCancelMergeAct.setWhatsThis(self.tr(
"""<b>Cancel uncommitted merge</b>"""
"""<p>This cancels an uncommitted merge causing all changes"""
""" to be lost.</p>"""
))
self.hgCancelMergeAct.triggered.connect(self.__hgCancelMerge)
self.actions.append(self.hgCancelMergeAct)
self.hgReMergeAct = E5Action(
self.tr('Re-Merge'),
UI.PixmapCache.getIcon("vcsMerge.png"),
self.tr('Re-Merge'),
0, 0, self, 'mercurial_remerge')
self.hgReMergeAct.setStatusTip(self.tr(
'Re-Merge all conflicting, unresolved files of the project'
))
self.hgReMergeAct.setWhatsThis(self.tr(
"""<b>Re-Merge</b>"""
"""<p>This re-merges all conflicting, unresolved files of the"""
""" project discarding any previous merge attempt.</p>"""
))
self.hgReMergeAct.triggered.connect(self.__hgReMerge)
self.actions.append(self.hgReMergeAct)
self.hgShowConflictsAct = E5Action(
self.tr('Show conflicts'),
self.tr('Show conflicts...'),
0, 0, self, 'mercurial_show_conflicts')
self.hgShowConflictsAct.setStatusTip(self.tr(
'Show a dialog listing all files with conflicts'
))
self.hgShowConflictsAct.setWhatsThis(self.tr(
"""<b>Show conflicts</b>"""
"""<p>This shows a dialog listing all files which had or still"""
""" have conflicts.</p>"""
))
self.hgShowConflictsAct.triggered.connect(self.__hgShowConflicts)
self.actions.append(self.hgShowConflictsAct)
self.vcsResolveAct = E5Action(
self.tr('Conflicts resolved'),
self.tr('Con&flicts resolved'),
0, 0, self, 'mercurial_resolve')
self.vcsResolveAct.setStatusTip(self.tr(
'Mark all conflicts of the local project as resolved'
))
self.vcsResolveAct.setWhatsThis(self.tr(
"""<b>Conflicts resolved</b>"""
"""<p>This marks all conflicts of the local project as"""
""" resolved.</p>"""
))
self.vcsResolveAct.triggered.connect(self.__hgResolved)
self.actions.append(self.vcsResolveAct)
self.hgUnresolveAct = E5Action(
self.tr('Conflicts unresolved'),
self.tr('Conflicts unresolved'),
0, 0, self, 'mercurial_unresolve')
self.hgUnresolveAct.setStatusTip(self.tr(
'Mark all conflicts of the local project as unresolved'
))
self.hgUnresolveAct.setWhatsThis(self.tr(
"""<b>Conflicts unresolved</b>"""
"""<p>This marks all conflicts of the local project as"""
""" unresolved.</p>"""
))
self.hgUnresolveAct.triggered.connect(self.__hgUnresolved)
self.actions.append(self.hgUnresolveAct)
self.vcsTagAct = E5Action(
self.tr('Tag in repository'),
UI.PixmapCache.getIcon("vcsTag.png"),
self.tr('&Tag in repository...'),
0, 0, self, 'mercurial_tag')
self.vcsTagAct.setStatusTip(self.tr(
'Tag the local project in the repository'
))
self.vcsTagAct.setWhatsThis(self.tr(
"""<b>Tag in repository</b>"""
"""<p>This tags the local project in the repository.</p>"""
))
self.vcsTagAct.triggered.connect(self._vcsTag)
self.actions.append(self.vcsTagAct)
self.hgTagListAct = E5Action(
self.tr('List tags'),
self.tr('List tags...'),
0, 0, self, 'mercurial_list_tags')
self.hgTagListAct.setStatusTip(self.tr(
'List tags of the project'
))
self.hgTagListAct.setWhatsThis(self.tr(
"""<b>List tags</b>"""
"""<p>This lists the tags of the project.</p>"""
))
self.hgTagListAct.triggered.connect(self.__hgTagList)
self.actions.append(self.hgTagListAct)
self.hgBranchListAct = E5Action(
self.tr('List branches'),
self.tr('List branches...'),
0, 0, self, 'mercurial_list_branches')
self.hgBranchListAct.setStatusTip(self.tr(
'List branches of the project'
))
self.hgBranchListAct.setWhatsThis(self.tr(
"""<b>List branches</b>"""
"""<p>This lists the branches of the project.</p>"""
))
self.hgBranchListAct.triggered.connect(self.__hgBranchList)
self.actions.append(self.hgBranchListAct)
self.hgBranchAct = E5Action(
self.tr('Create branch'),
UI.PixmapCache.getIcon("vcsBranch.png"),
self.tr('Create &branch...'),
0, 0, self, 'mercurial_branch')
self.hgBranchAct.setStatusTip(self.tr(
'Create a new branch for the local project in the repository'
))
self.hgBranchAct.setWhatsThis(self.tr(
"""<b>Create branch</b>"""
"""<p>This creates a new branch for the local project """
"""in the repository.</p>"""
))
self.hgBranchAct.triggered.connect(self.__hgBranch)
self.actions.append(self.hgBranchAct)
self.hgPushBranchAct = E5Action(
self.tr('Push new branch'),
self.tr('Push new branch'),
0, 0, self, 'mercurial_push_branch')
self.hgPushBranchAct.setStatusTip(self.tr(
'Push the current branch of the local project as a new named'
' branch'
))
self.hgPushBranchAct.setWhatsThis(self.tr(
"""<b>Push new branch</b>"""
"""<p>This pushes the current branch of the local project"""
""" as a new named branch.</p>"""
))
self.hgPushBranchAct.triggered.connect(self.__hgPushNewBranch)
self.actions.append(self.hgPushBranchAct)
self.hgCloseBranchAct = E5Action(
self.tr('Close branch'),
self.tr('Close branch'),
0, 0, self, 'mercurial_close_branch')
self.hgCloseBranchAct.setStatusTip(self.tr(
'Close the current branch of the local project'
))
self.hgCloseBranchAct.setWhatsThis(self.tr(
"""<b>Close branch</b>"""
"""<p>This closes the current branch of the local project.</p>"""
))
self.hgCloseBranchAct.triggered.connect(self.__hgCloseBranch)
self.actions.append(self.hgCloseBranchAct)
self.hgShowBranchAct = E5Action(
self.tr('Show current branch'),
self.tr('Show current branch'),
0, 0, self, 'mercurial_show_branch')
self.hgShowBranchAct.setStatusTip(self.tr(
'Show the current branch of the project'
))
self.hgShowBranchAct.setWhatsThis(self.tr(
"""<b>Show current branch</b>"""
"""<p>This shows the current branch of the project.</p>"""
))
self.hgShowBranchAct.triggered.connect(self.__hgShowBranch)
self.actions.append(self.hgShowBranchAct)
self.vcsSwitchAct = E5Action(
self.tr('Switch'),
UI.PixmapCache.getIcon("vcsSwitch.png"),
self.tr('S&witch...'),
0, 0, self, 'mercurial_switch')
self.vcsSwitchAct.setStatusTip(self.tr(
'Switch the working directory to another revision'
))
self.vcsSwitchAct.setWhatsThis(self.tr(
"""<b>Switch</b>"""
"""<p>This switches the working directory to another"""
""" revision.</p>"""
))
self.vcsSwitchAct.triggered.connect(self._vcsSwitch)
self.actions.append(self.vcsSwitchAct)
self.vcsCleanupAct = E5Action(
self.tr('Cleanup'),
self.tr('Cleanu&p'),
0, 0, self, 'mercurial_cleanup')
self.vcsCleanupAct.setStatusTip(self.tr(
'Cleanup the local project'
))
self.vcsCleanupAct.setWhatsThis(self.tr(
"""<b>Cleanup</b>"""
"""<p>This performs a cleanup of the local project.</p>"""
))
self.vcsCleanupAct.triggered.connect(self._vcsCleanup)
self.actions.append(self.vcsCleanupAct)
self.vcsCommandAct = E5Action(
self.tr('Execute command'),
self.tr('E&xecute command...'),
0, 0, self, 'mercurial_command')
self.vcsCommandAct.setStatusTip(self.tr(
'Execute an arbitrary Mercurial command'
))
self.vcsCommandAct.setWhatsThis(self.tr(
"""<b>Execute command</b>"""
"""<p>This opens a dialog to enter an arbitrary Mercurial"""
""" command.</p>"""
))
self.vcsCommandAct.triggered.connect(self._vcsCommand)
self.actions.append(self.vcsCommandAct)
self.hgConfigAct = E5Action(
self.tr('Configure'),
self.tr('Configure...'),
0, 0, self, 'mercurial_configure')
self.hgConfigAct.setStatusTip(self.tr(
'Show the configuration dialog with the Mercurial page selected'
))
self.hgConfigAct.setWhatsThis(self.tr(
"""<b>Configure</b>"""
"""<p>Show the configuration dialog with the Mercurial page"""
""" selected.</p>"""
))
self.hgConfigAct.triggered.connect(self.__hgConfigure)
self.actions.append(self.hgConfigAct)
self.hgEditUserConfigAct = E5Action(
self.tr('Edit user configuration'),
self.tr('Edit user configuration...'),
0, 0, self, 'mercurial_user_configure')
self.hgEditUserConfigAct.setStatusTip(self.tr(
'Show an editor to edit the user configuration file'
))
self.hgEditUserConfigAct.setWhatsThis(self.tr(
"""<b>Edit user configuration</b>"""
"""<p>Show an editor to edit the user configuration file.</p>"""
))
self.hgEditUserConfigAct.triggered.connect(self.__hgEditUserConfig)
self.actions.append(self.hgEditUserConfigAct)
self.hgRepoConfigAct = E5Action(
self.tr('Edit repository configuration'),
self.tr('Edit repository configuration...'),
0, 0, self, 'mercurial_repo_configure')
self.hgRepoConfigAct.setStatusTip(self.tr(
'Show an editor to edit the repository configuration file'
))
self.hgRepoConfigAct.setWhatsThis(self.tr(
"""<b>Edit repository configuration</b>"""
"""<p>Show an editor to edit the repository configuration"""
""" file.</p>"""
))
self.hgRepoConfigAct.triggered.connect(self.__hgEditRepoConfig)
self.actions.append(self.hgRepoConfigAct)
self.hgShowConfigAct = E5Action(
self.tr('Show combined configuration settings'),
self.tr('Show combined configuration settings...'),
0, 0, self, 'mercurial_show_config')
self.hgShowConfigAct.setStatusTip(self.tr(
'Show the combined configuration settings from all configuration'
' files'
))
self.hgShowConfigAct.setWhatsThis(self.tr(
"""<b>Show combined configuration settings</b>"""
"""<p>This shows the combined configuration settings"""
""" from all configuration files.</p>"""
))
self.hgShowConfigAct.triggered.connect(self.__hgShowConfig)
self.actions.append(self.hgShowConfigAct)
self.hgShowPathsAct = E5Action(
self.tr('Show paths'),
self.tr('Show paths...'),
0, 0, self, 'mercurial_show_paths')
self.hgShowPathsAct.setStatusTip(self.tr(
'Show the aliases for remote repositories'
))
self.hgShowPathsAct.setWhatsThis(self.tr(
"""<b>Show paths</b>"""
"""<p>This shows the aliases for remote repositories.</p>"""
))
self.hgShowPathsAct.triggered.connect(self.__hgShowPaths)
self.actions.append(self.hgShowPathsAct)
self.hgVerifyAct = E5Action(
self.tr('Verify repository'),
self.tr('Verify repository...'),
0, 0, self, 'mercurial_verify')
self.hgVerifyAct.setStatusTip(self.tr(
'Verify the integrity of the repository'
))
self.hgVerifyAct.setWhatsThis(self.tr(
"""<b>Verify repository</b>"""
"""<p>This verifies the integrity of the repository.</p>"""
))
self.hgVerifyAct.triggered.connect(self.__hgVerify)
self.actions.append(self.hgVerifyAct)
self.hgRecoverAct = E5Action(
self.tr('Recover'),
self.tr('Recover...'),
0, 0, self, 'mercurial_recover')
self.hgRecoverAct.setStatusTip(self.tr(
'Recover from an interrupted transaction'
))
self.hgRecoverAct.setWhatsThis(self.tr(
"""<b>Recover</b>"""
"""<p>This recovers from an interrupted transaction.</p>"""
))
self.hgRecoverAct.triggered.connect(self.__hgRecover)
self.actions.append(self.hgRecoverAct)
self.hgIdentifyAct = E5Action(
self.tr('Identify'),
self.tr('Identify...'),
0, 0, self, 'mercurial_identify')
self.hgIdentifyAct.setStatusTip(self.tr(
'Identify the project directory'
))
self.hgIdentifyAct.setWhatsThis(self.tr(
"""<b>Identify</b>"""
"""<p>This identifies the project directory.</p>"""
))
self.hgIdentifyAct.triggered.connect(self.__hgIdentify)
self.actions.append(self.hgIdentifyAct)
self.hgCreateIgnoreAct = E5Action(
self.tr('Create .hgignore'),
self.tr('Create .hgignore'),
0, 0, self, 'mercurial_create ignore')
self.hgCreateIgnoreAct.setStatusTip(self.tr(
'Create a .hgignore file with default values'
))
self.hgCreateIgnoreAct.setWhatsThis(self.tr(
"""<b>Create .hgignore</b>"""
"""<p>This creates a .hgignore file with default values.</p>"""
))
self.hgCreateIgnoreAct.triggered.connect(self.__hgCreateIgnore)
self.actions.append(self.hgCreateIgnoreAct)
self.hgBundleAct = E5Action(
self.tr('Create changegroup'),
self.tr('Create changegroup...'),
0, 0, self, 'mercurial_bundle')
self.hgBundleAct.setStatusTip(self.tr(
'Create changegroup file collecting changesets'
))
self.hgBundleAct.setWhatsThis(self.tr(
"""<b>Create changegroup</b>"""
"""<p>This creates a changegroup file collecting selected"""
""" changesets (hg bundle).</p>"""
))
self.hgBundleAct.triggered.connect(self.__hgBundle)
self.actions.append(self.hgBundleAct)
self.hgPreviewBundleAct = E5Action(
self.tr('Preview changegroup'),
self.tr('Preview changegroup...'),
0, 0, self, 'mercurial_preview_bundle')
self.hgPreviewBundleAct.setStatusTip(self.tr(
'Preview a changegroup file containing a collection of changesets'
))
self.hgPreviewBundleAct.setWhatsThis(self.tr(
"""<b>Preview changegroup</b>"""
"""<p>This previews a changegroup file containing a collection"""
""" of changesets.</p>"""
))
self.hgPreviewBundleAct.triggered.connect(self.__hgPreviewBundle)
self.actions.append(self.hgPreviewBundleAct)
self.hgUnbundleAct = E5Action(
self.tr('Apply changegroups'),
self.tr('Apply changegroups...'),
0, 0, self, 'mercurial_unbundle')
self.hgUnbundleAct.setStatusTip(self.tr(
'Apply one or several changegroup files'
))
self.hgUnbundleAct.setWhatsThis(self.tr(
"""<b>Apply changegroups</b>"""
"""<p>This applies one or several changegroup files generated by"""
""" the 'Create changegroup' action (hg unbundle).</p>"""
))
self.hgUnbundleAct.triggered.connect(self.__hgUnbundle)
self.actions.append(self.hgUnbundleAct)
self.hgBisectGoodAct = E5Action(
self.tr('Mark as "good"'),
self.tr('Mark as "good"...'),
0, 0, self, 'mercurial_bisect_good')
self.hgBisectGoodAct.setStatusTip(self.tr(
'Mark a selectable changeset as good'
))
self.hgBisectGoodAct.setWhatsThis(self.tr(
"""<b>Mark as good</b>"""
"""<p>This marks a selectable changeset as good.</p>"""
))
self.hgBisectGoodAct.triggered.connect(self.__hgBisectGood)
self.actions.append(self.hgBisectGoodAct)
self.hgBisectBadAct = E5Action(
self.tr('Mark as "bad"'),
self.tr('Mark as "bad"...'),
0, 0, self, 'mercurial_bisect_bad')
self.hgBisectBadAct.setStatusTip(self.tr(
'Mark a selectable changeset as bad'
))
self.hgBisectBadAct.setWhatsThis(self.tr(
"""<b>Mark as bad</b>"""
"""<p>This marks a selectable changeset as bad.</p>"""
))
self.hgBisectBadAct.triggered.connect(self.__hgBisectBad)
self.actions.append(self.hgBisectBadAct)
self.hgBisectSkipAct = E5Action(
self.tr('Skip'),
self.tr('Skip...'),
0, 0, self, 'mercurial_bisect_skip')
self.hgBisectSkipAct.setStatusTip(self.tr(
'Skip a selectable changeset'
))
self.hgBisectSkipAct.setWhatsThis(self.tr(
"""<b>Skip</b>"""
"""<p>This skips a selectable changeset.</p>"""
))
self.hgBisectSkipAct.triggered.connect(self.__hgBisectSkip)
self.actions.append(self.hgBisectSkipAct)
self.hgBisectResetAct = E5Action(
self.tr('Reset'),
self.tr('Reset'),
0, 0, self, 'mercurial_bisect_reset')
self.hgBisectResetAct.setStatusTip(self.tr(
'Reset the bisect search data'
))
self.hgBisectResetAct.setWhatsThis(self.tr(
"""<b>Reset</b>"""
"""<p>This resets the bisect search data.</p>"""
))
self.hgBisectResetAct.triggered.connect(self.__hgBisectReset)
self.actions.append(self.hgBisectResetAct)
self.hgBackoutAct = E5Action(
self.tr('Back out changeset'),
self.tr('Back out changeset'),
0, 0, self, 'mercurial_backout')
self.hgBackoutAct.setStatusTip(self.tr(
'Back out changes of an earlier changeset'
))
self.hgBackoutAct.setWhatsThis(self.tr(
"""<b>Back out changeset</b>"""
"""<p>This backs out changes of an earlier changeset.</p>"""
))
self.hgBackoutAct.triggered.connect(self.__hgBackout)
self.actions.append(self.hgBackoutAct)
self.hgRollbackAct = E5Action(
self.tr('Rollback last transaction'),
self.tr('Rollback last transaction'),
0, 0, self, 'mercurial_rollback')
self.hgRollbackAct.setStatusTip(self.tr(
'Rollback the last transaction'
))
self.hgRollbackAct.setWhatsThis(self.tr(
"""<b>Rollback last transaction</b>"""
"""<p>This performs a rollback of the last transaction."""
""" Transactions are used to encapsulate the effects of all"""
""" commands that create new changesets or propagate existing"""
""" changesets into a repository. For example, the following"""
""" commands are transactional, and their effects can be"""
""" rolled back:<ul>"""
"""<li>commit</li>"""
"""<li>import</li>"""
"""<li>pull</li>"""
"""<li>push (with this repository as the destination)</li>"""
"""<li>unbundle</li>"""
"""</ul>"""
"""</p><p><strong>This command is dangerous. Please use with"""
""" care. </strong></p>"""
))
self.hgRollbackAct.triggered.connect(self.__hgRollback)
self.actions.append(self.hgRollbackAct)
self.hgServeAct = E5Action(
self.tr('Serve project repository'),
self.tr('Serve project repository...'),
0, 0, self, 'mercurial_serve')
self.hgServeAct.setStatusTip(self.tr(
'Serve the project repository'
))
self.hgServeAct.setWhatsThis(self.tr(
"""<b>Serve project repository</b>"""
"""<p>This serves the project repository.</p>"""
))
self.hgServeAct.triggered.connect(self.__hgServe)
self.actions.append(self.hgServeAct)
self.hgImportAct = E5Action(
self.tr('Import Patch'),
self.tr('Import Patch...'),
0, 0, self, 'mercurial_import')
self.hgImportAct.setStatusTip(self.tr(
'Import a patch from a patch file'
))
self.hgImportAct.setWhatsThis(self.tr(
"""<b>Import Patch</b>"""
"""<p>This imports a patch from a patch file into the"""
""" project.</p>"""
))
self.hgImportAct.triggered.connect(self.__hgImport)
self.actions.append(self.hgImportAct)
self.hgExportAct = E5Action(
self.tr('Export Patches'),
self.tr('Export Patches...'),
0, 0, self, 'mercurial_export')
self.hgExportAct.setStatusTip(self.tr(
'Export revisions to patch files'
))
self.hgExportAct.setWhatsThis(self.tr(
"""<b>Export Patches</b>"""
"""<p>This exports revisions of the project to patch files.</p>"""
))
self.hgExportAct.triggered.connect(self.__hgExport)
self.actions.append(self.hgExportAct)
self.hgPhaseAct = E5Action(
self.tr('Change Phase'),
self.tr('Change Phase...'),
0, 0, self, 'mercurial_change_phase')
self.hgPhaseAct.setStatusTip(self.tr(
'Change the phase of revisions'
))
self.hgPhaseAct.setWhatsThis(self.tr(
"""<b>Change Phase</b>"""
"""<p>This changes the phase of revisions.</p>"""
))
self.hgPhaseAct.triggered.connect(self.__hgPhase)
self.actions.append(self.hgPhaseAct)
self.hgGraftAct = E5Action(
self.tr('Copy Changesets'),
UI.PixmapCache.getIcon("vcsGraft.png"),
self.tr('Copy Changesets'),
0, 0, self, 'mercurial_graft')
self.hgGraftAct.setStatusTip(self.tr(
'Copies changesets from another branch'
))
self.hgGraftAct.setWhatsThis(self.tr(
"""<b>Copy Changesets</b>"""
"""<p>This copies changesets from another branch on top of the"""
""" current working directory with the user, date and"""
""" description of the original changeset.</p>"""
))
self.hgGraftAct.triggered.connect(self.__hgGraft)
self.actions.append(self.hgGraftAct)
self.hgGraftContinueAct = E5Action(
self.tr('Continue Copying Session'),
self.tr('Continue Copying Session'),
0, 0, self, 'mercurial_graft_continue')
self.hgGraftContinueAct.setStatusTip(self.tr(
'Continue the last copying session after conflicts were resolved'
))
self.hgGraftContinueAct.setWhatsThis(self.tr(
"""<b>Continue Copying Session</b>"""
"""<p>This continues the last copying session after conflicts"""
""" were resolved.</p>"""
))
self.hgGraftContinueAct.triggered.connect(self.__hgGraftContinue)
self.actions.append(self.hgGraftContinueAct)
self.hgAddSubrepoAct = E5Action(
self.tr('Add'),
UI.PixmapCache.getIcon("vcsAdd.png"),
self.tr('Add...'),
0, 0, self, 'mercurial_add_subrepo')
self.hgAddSubrepoAct.setStatusTip(self.tr(
'Add a sub-repository'
))
self.hgAddSubrepoAct.setWhatsThis(self.tr(
"""<b>Add...</b>"""
"""<p>Add a sub-repository to the project.</p>"""
))
self.hgAddSubrepoAct.triggered.connect(self.__hgAddSubrepository)
self.actions.append(self.hgAddSubrepoAct)
self.hgRemoveSubreposAct = E5Action(
self.tr('Remove'),
UI.PixmapCache.getIcon("vcsRemove.png"),
self.tr('Remove...'),
0, 0, self, 'mercurial_remove_subrepos')
self.hgRemoveSubreposAct.setStatusTip(self.tr(
'Remove sub-repositories'
))
self.hgRemoveSubreposAct.setWhatsThis(self.tr(
"""<b>Remove...</b>"""
"""<p>Remove sub-repositories from the project.</p>"""
))
self.hgRemoveSubreposAct.triggered.connect(
self.__hgRemoveSubrepositories)
self.actions.append(self.hgRemoveSubreposAct)
self.hgArchiveAct = E5Action(
self.tr('Create unversioned archive'),
UI.PixmapCache.getIcon("vcsExport.png"),
self.tr('Create unversioned archive...'),
0, 0, self, 'mercurial_archive')
self.hgArchiveAct.setStatusTip(self.tr(
'Create an unversioned archive from the repository'
))
self.hgArchiveAct.setWhatsThis(self.tr(
"""<b>Create unversioned archive...</b>"""
"""<p>This creates an unversioned archive from the"""
""" repository.</p>"""
))
self.hgArchiveAct.triggered.connect(self.__hgArchive)
self.actions.append(self.hgArchiveAct)
self.hgBookmarksListAct = E5Action(
self.tr('List bookmarks'),
UI.PixmapCache.getIcon("listBookmarks.png"),
self.tr('List bookmarks...'),
0, 0, self, 'mercurial_list_bookmarks')
self.hgBookmarksListAct.setStatusTip(self.tr(
'List bookmarks of the project'
))
self.hgBookmarksListAct.setWhatsThis(self.tr(
"""<b>List bookmarks</b>"""
"""<p>This lists the bookmarks of the project.</p>"""
))
self.hgBookmarksListAct.triggered.connect(self.__hgBookmarksList)
self.actions.append(self.hgBookmarksListAct)
self.hgBookmarkDefineAct = E5Action(
self.tr('Define bookmark'),
UI.PixmapCache.getIcon("addBookmark.png"),
self.tr('Define bookmark...'),
0, 0, self, 'mercurial_define_bookmark')
self.hgBookmarkDefineAct.setStatusTip(self.tr(
'Define a bookmark for the project'
))
self.hgBookmarkDefineAct.setWhatsThis(self.tr(
"""<b>Define bookmark</b>"""
"""<p>This defines a bookmark for the project.</p>"""
))
self.hgBookmarkDefineAct.triggered.connect(self.__hgBookmarkDefine)
self.actions.append(self.hgBookmarkDefineAct)
self.hgBookmarkDeleteAct = E5Action(
self.tr('Delete bookmark'),
UI.PixmapCache.getIcon("deleteBookmark.png"),
self.tr('Delete bookmark...'),
0, 0, self, 'mercurial_delete_bookmark')
self.hgBookmarkDeleteAct.setStatusTip(self.tr(
'Delete a bookmark of the project'
))
self.hgBookmarkDeleteAct.setWhatsThis(self.tr(
"""<b>Delete bookmark</b>"""
"""<p>This deletes a bookmark of the project.</p>"""
))
self.hgBookmarkDeleteAct.triggered.connect(self.__hgBookmarkDelete)
self.actions.append(self.hgBookmarkDeleteAct)
self.hgBookmarkRenameAct = E5Action(
self.tr('Rename bookmark'),
UI.PixmapCache.getIcon("renameBookmark.png"),
self.tr('Rename bookmark...'),
0, 0, self, 'mercurial_rename_bookmark')
self.hgBookmarkRenameAct.setStatusTip(self.tr(
'Rename a bookmark of the project'
))
self.hgBookmarkRenameAct.setWhatsThis(self.tr(
"""<b>Rename bookmark</b>"""
"""<p>This renames a bookmark of the project.</p>"""
))
self.hgBookmarkRenameAct.triggered.connect(self.__hgBookmarkRename)
self.actions.append(self.hgBookmarkRenameAct)
self.hgBookmarkMoveAct = E5Action(
self.tr('Move bookmark'),
UI.PixmapCache.getIcon("moveBookmark.png"),
self.tr('Move bookmark...'),
0, 0, self, 'mercurial_move_bookmark')
self.hgBookmarkMoveAct.setStatusTip(self.tr(
'Move a bookmark of the project'
))
self.hgBookmarkMoveAct.setWhatsThis(self.tr(
"""<b>Move bookmark</b>"""
"""<p>This moves a bookmark of the project to another"""
""" changeset.</p>"""
))
self.hgBookmarkMoveAct.triggered.connect(self.__hgBookmarkMove)
self.actions.append(self.hgBookmarkMoveAct)
self.hgBookmarkIncomingAct = E5Action(
self.tr('Show incoming bookmarks'),
UI.PixmapCache.getIcon("incomingBookmark.png"),
self.tr('Show incoming bookmarks'),
0, 0, self, 'mercurial_incoming_bookmarks')
self.hgBookmarkIncomingAct.setStatusTip(self.tr(
'Show a list of incoming bookmarks'
))
self.hgBookmarkIncomingAct.setWhatsThis(self.tr(
"""<b>Show incoming bookmarks</b>"""
"""<p>This shows a list of new bookmarks available at the remote"""
""" repository.</p>"""
))
self.hgBookmarkIncomingAct.triggered.connect(
self.__hgBookmarkIncoming)
self.actions.append(self.hgBookmarkIncomingAct)
self.hgBookmarkPullAct = E5Action(
self.tr('Pull bookmark'),
UI.PixmapCache.getIcon("pullBookmark.png"),
self.tr('Pull bookmark'),
0, 0, self, 'mercurial_pull_bookmark')
self.hgBookmarkPullAct.setStatusTip(self.tr(
'Pull a bookmark from a remote repository'
))
self.hgBookmarkPullAct.setWhatsThis(self.tr(
"""<b>Pull bookmark</b>"""
"""<p>This pulls a bookmark from a remote repository into the """
"""local repository.</p>"""
))
self.hgBookmarkPullAct.triggered.connect(self.__hgBookmarkPull)
self.actions.append(self.hgBookmarkPullAct)
self.hgBookmarkOutgoingAct = E5Action(
self.tr('Show outgoing bookmarks'),
UI.PixmapCache.getIcon("outgoingBookmark.png"),
self.tr('Show outgoing bookmarks'),
0, 0, self, 'mercurial_outgoing_bookmarks')
self.hgBookmarkOutgoingAct.setStatusTip(self.tr(
'Show a list of outgoing bookmarks'
))
self.hgBookmarkOutgoingAct.setWhatsThis(self.tr(
"""<b>Show outgoing bookmarks</b>"""
"""<p>This shows a list of new bookmarks available at the local"""
""" repository.</p>"""
))
self.hgBookmarkOutgoingAct.triggered.connect(
self.__hgBookmarkOutgoing)
self.actions.append(self.hgBookmarkOutgoingAct)
self.hgBookmarkPushAct = E5Action(
self.tr('Push bookmark'),
UI.PixmapCache.getIcon("pushBookmark.png"),
self.tr('Push bookmark'),
0, 0, self, 'mercurial_push_bookmark')
self.hgBookmarkPushAct.setStatusTip(self.tr(
'Push a bookmark to a remote repository'
))
self.hgBookmarkPushAct.setWhatsThis(self.tr(
"""<b>Push bookmark</b>"""
"""<p>This pushes a bookmark from the local repository to a """
"""remote repository.</p>"""
))
self.hgBookmarkPushAct.triggered.connect(self.__hgBookmarkPush)
self.actions.append(self.hgBookmarkPushAct)
def __checkActions(self):
"""
Private slot to set the enabled status of actions.
"""
self.hgPullAct.setEnabled(self.vcs.canPull())
self.hgIncomingAct.setEnabled(self.vcs.canPull())
self.hgBookmarkPullAct.setEnabled(self.vcs.canPull())
self.hgBookmarkIncomingAct.setEnabled(self.vcs.canPull())
self.hgPushAct.setEnabled(self.vcs.canPush())
self.hgPushBranchAct.setEnabled(self.vcs.canPush())
self.hgPushForcedAct.setEnabled(self.vcs.canPush())
self.hgOutgoingAct.setEnabled(self.vcs.canPush())
self.hgBookmarkPushAct.setEnabled(self.vcs.canPush())
self.hgBookmarkOutgoingAct.setEnabled(self.vcs.canPush())
def initMenu(self, menu):
"""
Public method to generate the VCS menu.
@param menu reference to the menu to be populated (QMenu)
"""
menu.clear()
self.subMenus = []
adminMenu = QMenu(self.tr("Administration"), menu)
adminMenu.setTearOffEnabled(True)
adminMenu.addAction(self.hgHeadsAct)
adminMenu.addAction(self.hgParentsAct)
adminMenu.addAction(self.hgTipAct)
adminMenu.addAction(self.hgShowBranchAct)
adminMenu.addAction(self.hgIdentifyAct)
adminMenu.addSeparator()
adminMenu.addAction(self.hgShowPathsAct)
adminMenu.addSeparator()
adminMenu.addAction(self.hgShowConfigAct)
adminMenu.addAction(self.hgRepoConfigAct)
adminMenu.addSeparator()
adminMenu.addAction(self.hgCreateIgnoreAct)
adminMenu.addSeparator()
adminMenu.addAction(self.hgRecoverAct)
adminMenu.addSeparator()
adminMenu.addAction(self.hgBackoutAct)
adminMenu.addAction(self.hgRollbackAct)
adminMenu.addSeparator()
adminMenu.addAction(self.hgVerifyAct)
self.subMenus.append(adminMenu)
specialsMenu = QMenu(self.tr("Specials"), menu)
specialsMenu.setTearOffEnabled(True)
specialsMenu.addAction(self.hgArchiveAct)
specialsMenu.addSeparator()
specialsMenu.addAction(self.hgPushForcedAct)
specialsMenu.addSeparator()
specialsMenu.addAction(self.hgServeAct)
self.subMenus.append(specialsMenu)
bundleMenu = QMenu(self.tr("Changegroup Management"), menu)
bundleMenu.setTearOffEnabled(True)
bundleMenu.addAction(self.hgBundleAct)
bundleMenu.addAction(self.hgPreviewBundleAct)
bundleMenu.addAction(self.hgUnbundleAct)
self.subMenus.append(bundleMenu)
patchMenu = QMenu(self.tr("Patch Management"), menu)
patchMenu.setTearOffEnabled(True)
patchMenu.addAction(self.hgImportAct)
patchMenu.addAction(self.hgExportAct)
self.subMenus.append(patchMenu)
bisectMenu = QMenu(self.tr("Bisect"), menu)
bisectMenu.setTearOffEnabled(True)
bisectMenu.addAction(self.hgBisectGoodAct)
bisectMenu.addAction(self.hgBisectBadAct)
bisectMenu.addAction(self.hgBisectSkipAct)
bisectMenu.addAction(self.hgBisectResetAct)
self.subMenus.append(bisectMenu)
tagsMenu = QMenu(self.tr("Tags"), menu)
tagsMenu.setIcon(UI.PixmapCache.getIcon("vcsTag.png"))
tagsMenu.setTearOffEnabled(True)
tagsMenu.addAction(self.vcsTagAct)
tagsMenu.addAction(self.hgTagListAct)
self.subMenus.append(tagsMenu)
branchesMenu = QMenu(self.tr("Branches"), menu)
branchesMenu.setIcon(UI.PixmapCache.getIcon("vcsBranch.png"))
branchesMenu.setTearOffEnabled(True)
branchesMenu.addAction(self.hgBranchAct)
branchesMenu.addAction(self.hgPushBranchAct)
branchesMenu.addAction(self.hgCloseBranchAct)
branchesMenu.addAction(self.hgBranchListAct)
self.subMenus.append(branchesMenu)
bookmarksMenu = QMenu(self.tr("Bookmarks"), menu)
bookmarksMenu.setIcon(UI.PixmapCache.getIcon("bookmark22.png"))
bookmarksMenu.setTearOffEnabled(True)
bookmarksMenu.addAction(self.hgBookmarkDefineAct)
bookmarksMenu.addAction(self.hgBookmarkDeleteAct)
bookmarksMenu.addAction(self.hgBookmarkRenameAct)
bookmarksMenu.addAction(self.hgBookmarkMoveAct)
bookmarksMenu.addSeparator()
bookmarksMenu.addAction(self.hgBookmarksListAct)
bookmarksMenu.addSeparator()
bookmarksMenu.addAction(self.hgBookmarkIncomingAct)
bookmarksMenu.addAction(self.hgBookmarkPullAct)
bookmarksMenu.addSeparator()
bookmarksMenu.addAction(self.hgBookmarkOutgoingAct)
bookmarksMenu.addAction(self.hgBookmarkPushAct)
self.subMenus.append(bookmarksMenu)
self.__extensionsMenu = QMenu(self.tr("Extensions"), menu)
self.__extensionsMenu.setTearOffEnabled(True)
self.__extensionsMenu.aboutToShow.connect(self.__showExtensionMenu)
self.extensionMenus = {}
for extensionMenuTitle in sorted(self.__extensionMenuTitles):
extensionName = self.__extensionMenuTitles[extensionMenuTitle]
self.extensionMenus[extensionName] = self.__extensionsMenu.addMenu(
self.__extensions[extensionName].initMenu(
self.__extensionsMenu))
self.vcs.activeExtensionsChanged.connect(self.__showExtensionMenu)
if self.vcs.version >= (2, 0):
graftMenu = QMenu(self.tr("Graft"), menu)
graftMenu.setIcon(UI.PixmapCache.getIcon("vcsGraft.png"))
graftMenu.setTearOffEnabled(True)
graftMenu.addAction(self.hgGraftAct)
graftMenu.addAction(self.hgGraftContinueAct)
else:
graftMenu = None
subrepoMenu = QMenu(self.tr("Sub-Repository"), menu)
subrepoMenu.setTearOffEnabled(True)
subrepoMenu.addAction(self.hgAddSubrepoAct)
subrepoMenu.addAction(self.hgRemoveSubreposAct)
changesMenu = QMenu(self.tr("Manage Changes"), menu)
changesMenu.setTearOffEnabled(True)
changesMenu.addAction(self.vcsRevertAct)
changesMenu.addSeparator()
changesMenu.addAction(self.vcsMergeAct)
changesMenu.addAction(self.hgShowConflictsAct)
changesMenu.addAction(self.vcsResolveAct)
changesMenu.addAction(self.hgUnresolveAct)
changesMenu.addAction(self.hgReMergeAct)
changesMenu.addAction(self.hgCancelMergeAct)
if self.vcs.version >= (2, 1):
changesMenu.addSeparator()
changesMenu.addAction(self.hgPhaseAct)
act = menu.addAction(
UI.PixmapCache.getIcon(
os.path.join("VcsPlugins", "vcsMercurial", "icons",
"mercurial.png")),
self.vcs.vcsName(), self._vcsInfoDisplay)
font = act.font()
font.setBold(True)
act.setFont(font)
menu.addSeparator()
menu.addAction(self.hgIncomingAct)
menu.addAction(self.hgPullAct)
menu.addAction(self.vcsUpdateAct)
menu.addSeparator()
menu.addAction(self.vcsCommitAct)
menu.addAction(self.hgOutgoingAct)
menu.addAction(self.hgPushAct)
menu.addSeparator()
menu.addMenu(changesMenu)
menu.addSeparator()
if graftMenu is not None:
menu.addMenu(graftMenu)
menu.addSeparator()
menu.addMenu(bundleMenu)
menu.addMenu(patchMenu)
menu.addSeparator()
menu.addMenu(tagsMenu)
menu.addMenu(branchesMenu)
menu.addMenu(bookmarksMenu)
menu.addSeparator()
menu.addAction(self.vcsLogAct)
menu.addAction(self.hgLogBrowserAct)
menu.addSeparator()
menu.addAction(self.vcsStatusAct)
menu.addAction(self.hgSummaryAct)
menu.addSeparator()
menu.addAction(self.vcsDiffAct)
menu.addAction(self.hgExtDiffAct)
menu.addSeparator()
menu.addMenu(self.__extensionsMenu)
menu.addSeparator()
menu.addAction(self.vcsSwitchAct)
menu.addSeparator()
menu.addMenu(subrepoMenu)
menu.addSeparator()
menu.addMenu(bisectMenu)
menu.addSeparator()
menu.addAction(self.vcsCleanupAct)
menu.addSeparator()
menu.addAction(self.vcsCommandAct)
menu.addSeparator()
menu.addMenu(adminMenu)
menu.addMenu(specialsMenu)
menu.addSeparator()
menu.addAction(self.hgEditUserConfigAct)
menu.addAction(self.hgConfigAct)
menu.addSeparator()
menu.addAction(self.vcsNewAct)
menu.addAction(self.vcsExportAct)
def initToolbar(self, ui, toolbarManager):
"""
Public slot to initialize the VCS toolbar.
@param ui reference to the main window (UserInterface)
@param toolbarManager reference to a toolbar manager object
(E5ToolBarManager)
"""
self.__toolbar = QToolBar(self.tr("Mercurial"), ui)
self.__toolbar.setIconSize(UI.Config.ToolBarIconSize)
self.__toolbar.setObjectName("MercurialToolbar")
self.__toolbar.setToolTip(self.tr('Mercurial'))
self.__toolbar.addAction(self.hgLogBrowserAct)
self.__toolbar.addAction(self.vcsStatusAct)
self.__toolbar.addSeparator()
self.__toolbar.addAction(self.vcsDiffAct)
self.__toolbar.addSeparator()
self.__toolbar.addAction(self.vcsNewAct)
self.__toolbar.addAction(self.vcsExportAct)
self.__toolbar.addSeparator()
title = self.__toolbar.windowTitle()
toolbarManager.addToolBar(self.__toolbar, title)
toolbarManager.addAction(self.hgPullAct, title)
toolbarManager.addAction(self.vcsUpdateAct, title)
toolbarManager.addAction(self.vcsCommitAct, title)
toolbarManager.addAction(self.hgPushAct, title)
toolbarManager.addAction(self.hgPushForcedAct, title)
toolbarManager.addAction(self.vcsLogAct, title)
toolbarManager.addAction(self.hgExtDiffAct, title)
toolbarManager.addAction(self.hgSummaryAct, title)
toolbarManager.addAction(self.vcsRevertAct, title)
toolbarManager.addAction(self.vcsMergeAct, title)
toolbarManager.addAction(self.hgReMergeAct, title)
toolbarManager.addAction(self.vcsTagAct, title)
toolbarManager.addAction(self.hgBranchAct, title)
toolbarManager.addAction(self.vcsSwitchAct, title)
toolbarManager.addAction(self.hgGraftAct, title)
toolbarManager.addAction(self.hgAddSubrepoAct, title)
toolbarManager.addAction(self.hgRemoveSubreposAct, title)
toolbarManager.addAction(self.hgArchiveAct, title)
toolbarManager.addAction(self.hgBookmarksListAct, title)
toolbarManager.addAction(self.hgBookmarkDefineAct, title)
toolbarManager.addAction(self.hgBookmarkDeleteAct, title)
toolbarManager.addAction(self.hgBookmarkRenameAct, title)
toolbarManager.addAction(self.hgBookmarkMoveAct, title)
toolbarManager.addAction(self.hgBookmarkPullAct, title)
toolbarManager.addAction(self.hgBookmarkPushAct, title)
self.__toolbar.setEnabled(False)
self.__toolbar.setVisible(False)
ui.registerToolbar("mercurial", self.__toolbar.windowTitle(),
self.__toolbar)
ui.addToolBar(self.__toolbar)
def removeToolbar(self, ui, toolbarManager):
"""
Public method to remove a toolbar created by initToolbar().
@param ui reference to the main window (UserInterface)
@param toolbarManager reference to a toolbar manager object
(E5ToolBarManager)
"""
ui.removeToolBar(self.__toolbar)
ui.unregisterToolbar("mercurial")
title = self.__toolbar.windowTitle()
toolbarManager.removeCategoryActions(title)
toolbarManager.removeToolBar(self.__toolbar)
self.__toolbar.deleteLater()
self.__toolbar = None
def showMenu(self):
"""
Public slot called before the vcs menu is shown.
"""
super(HgProjectHelper, self).showMenu()
self.__checkActions()
def shutdown(self):
"""
Public method to perform shutdown actions.
"""
self.vcs.activeExtensionsChanged.disconnect(self.__showExtensionMenu)
self.vcs.iniFileChanged.disconnect(self.__checkActions)
# close torn off sub menus
for menu in self.subMenus:
if menu.isTearOffMenuVisible():
menu.hideTearOffMenu()
# close torn off extension menus
for extensionName in self.extensionMenus:
self.__extensions[extensionName].shutdown()
menu = self.extensionMenus[extensionName].menu()
if menu.isTearOffMenuVisible():
menu.hideTearOffMenu()
if self.__extensionsMenu.isTearOffMenuVisible():
self.__extensionsMenu.hideTearOffMenu()
def __showExtensionMenu(self):
"""
Private slot showing the extensions menu.
"""
for extensionName in self.extensionMenus:
self.extensionMenus[extensionName].setEnabled(
self.vcs.isExtensionActive(extensionName))
if not self.extensionMenus[extensionName].isEnabled() and \
self.extensionMenus[extensionName].menu()\
.isTearOffMenuVisible():
self.extensionMenus[extensionName].menu().hideTearOffMenu()
def __hgExtendedDiff(self):
"""
Private slot used to perform a hg diff with the selection of revisions.
"""
self.vcs.hgExtendedDiff(self.project.ppath)
def __hgIncoming(self):
"""
Private slot used to show the log of changes coming into the
repository.
"""
self.vcs.hgIncoming(self.project.ppath)
def __hgOutgoing(self):
"""
Private slot used to show the log of changes going out of the
repository.
"""
self.vcs.hgOutgoing(self.project.ppath)
def __hgPull(self):
"""
Private slot used to pull changes from a remote repository.
"""
shouldReopen = self.vcs.hgPull(self.project.ppath)
if shouldReopen:
res = E5MessageBox.yesNo(
self.parent(),
self.tr("Pull"),
self.tr("""The project should be reread. Do this now?"""),
yesDefault=True)
if res:
self.project.reopenProject()
def __hgPush(self):
"""
Private slot used to push changes to a remote repository.
"""
self.vcs.hgPush(self.project.ppath)
def __hgPushForced(self):
"""
Private slot used to push changes to a remote repository using
the force option.
"""
self.vcs.hgPush(self.project.ppath, force=True)
def __hgHeads(self):
"""
Private slot used to show the heads of the repository.
"""
self.vcs.hgInfo(self.project.ppath, mode="heads")
def __hgParents(self):
"""
Private slot used to show the parents of the repository.
"""
self.vcs.hgInfo(self.project.ppath, mode="parents")
def __hgTip(self):
"""
Private slot used to show the tip of the repository.
"""
self.vcs.hgInfo(self.project.ppath, mode="tip")
def __hgResolved(self):
"""
Private slot used to mark conflicts of the local project as being
resolved.
"""
self.vcs.hgResolved(self.project.ppath)
def __hgUnresolved(self):
"""
Private slot used to mark conflicts of the local project as being
unresolved.
"""
self.vcs.hgResolved(self.project.ppath, unresolve=True)
def __hgCancelMerge(self):
"""
Private slot used to cancel an uncommitted merge.
"""
self.vcs.hgCancelMerge(self.project.ppath)
def __hgShowConflicts(self):
"""
Private slot used to list all files with conflicts.
"""
self.vcs.hgConflicts(self.project.ppath)
def __hgReMerge(self):
"""
Private slot used to list all files with conflicts.
"""
self.vcs.hgReMerge(self.project.ppath)
def __hgTagList(self):
"""
Private slot used to list the tags of the project.
"""
self.vcs.hgListTagBranch(self.project.ppath, True)
def __hgBranchList(self):
"""
Private slot used to list the branches of the project.
"""
self.vcs.hgListTagBranch(self.project.ppath, False)
def __hgBranch(self):
"""
Private slot used to create a new branch for the project.
"""
self.vcs.hgBranch(self.project.ppath)
def __hgShowBranch(self):
"""
Private slot used to show the current branch for the project.
"""
self.vcs.hgShowBranch(self.project.ppath)
def __hgConfigure(self):
"""
Private method to open the configuration dialog.
"""
e5App().getObject("UserInterface").showPreferences("zzz_mercurialPage")
def __hgCloseBranch(self):
"""
Private slot used to close the current branch of the local project.
"""
if Preferences.getVCS("AutoSaveProject"):
self.project.saveProject()
if Preferences.getVCS("AutoSaveFiles"):
self.project.saveAllScripts()
self.vcs.vcsCommit(self.project.ppath, '', closeBranch=True)
def __hgPushNewBranch(self):
"""
Private slot to push a new named branch.
"""
self.vcs.hgPush(self.project.ppath, newBranch=True)
def __hgEditUserConfig(self):
"""
Private slot used to edit the user configuration file.
"""
self.vcs.hgEditUserConfig()
def __hgEditRepoConfig(self):
"""
Private slot used to edit the repository configuration file.
"""
self.vcs.hgEditConfig(self.project.ppath)
def __hgShowConfig(self):
"""
Private slot used to show the combined configuration.
"""
self.vcs.hgShowConfig(self.project.ppath)
def __hgVerify(self):
"""
Private slot used to verify the integrity of the repository.
"""
self.vcs.hgVerify(self.project.ppath)
def __hgShowPaths(self):
"""
Private slot used to show the aliases for remote repositories.
"""
self.vcs.hgShowPaths(self.project.ppath)
def __hgRecover(self):
"""
Private slot used to recover from an interrupted transaction.
"""
self.vcs.hgRecover(self.project.ppath)
def __hgIdentify(self):
"""
Private slot used to identify the project directory.
"""
self.vcs.hgIdentify(self.project.ppath)
def __hgCreateIgnore(self):
"""
Private slot used to create a .hgignore file for the project.
"""
self.vcs.hgCreateIgnoreFile(self.project.ppath, autoAdd=True)
def __hgBundle(self):
"""
Private slot used to create a changegroup file.
"""
self.vcs.hgBundle(self.project.ppath)
def __hgPreviewBundle(self):
"""
Private slot used to preview a changegroup file.
"""
self.vcs.hgPreviewBundle(self.project.ppath)
def __hgUnbundle(self):
"""
Private slot used to apply changegroup files.
"""
shouldReopen = self.vcs.hgUnbundle(self.project.ppath)
if shouldReopen:
res = E5MessageBox.yesNo(
self.parent(),
self.tr("Apply changegroups"),
self.tr("""The project should be reread. Do this now?"""),
yesDefault=True)
if res:
self.project.reopenProject()
def __hgBisectGood(self):
"""
Private slot used to execute the bisect --good command.
"""
self.vcs.hgBisect(self.project.ppath, "good")
def __hgBisectBad(self):
"""
Private slot used to execute the bisect --bad command.
"""
self.vcs.hgBisect(self.project.ppath, "bad")
def __hgBisectSkip(self):
"""
Private slot used to execute the bisect --skip command.
"""
self.vcs.hgBisect(self.project.ppath, "skip")
def __hgBisectReset(self):
"""
Private slot used to execute the bisect --reset command.
"""
self.vcs.hgBisect(self.project.ppath, "reset")
def __hgBackout(self):
"""
Private slot used to back out changes of a changeset.
"""
self.vcs.hgBackout(self.project.ppath)
def __hgRollback(self):
"""
Private slot used to rollback the last transaction.
"""
self.vcs.hgRollback(self.project.ppath)
def __hgServe(self):
"""
Private slot used to serve the project.
"""
self.vcs.hgServe(self.project.ppath)
def __hgImport(self):
"""
Private slot used to import a patch file.
"""
shouldReopen = self.vcs.hgImport(self.project.ppath)
if shouldReopen:
res = E5MessageBox.yesNo(
self.parent(),
self.tr("Import Patch"),
self.tr("""The project should be reread. Do this now?"""),
yesDefault=True)
if res:
self.project.reopenProject()
def __hgExport(self):
"""
Private slot used to export revisions to patch files.
"""
self.vcs.hgExport(self.project.ppath)
def __hgRevert(self):
"""
Private slot used to revert changes made to the local project.
"""
shouldReopen = self.vcs.hgRevert(self.project.ppath)
if shouldReopen:
res = E5MessageBox.yesNo(
self.parent(),
self.tr("Revert Changes"),
self.tr("""The project should be reread. Do this now?"""),
yesDefault=True)
if res:
self.project.reopenProject()
def __hgPhase(self):
"""
Private slot used to change the phase of revisions.
"""
self.vcs.hgPhase(self.project.ppath)
def __hgGraft(self):
"""
Private slot used to copy changesets from another branch.
"""
shouldReopen = self.vcs.hgGraft(self.project.getProjectPath())
if shouldReopen:
res = E5MessageBox.yesNo(
None,
self.tr("Copy Changesets"),
self.tr("""The project should be reread. Do this now?"""),
yesDefault=True)
if res:
self.project.reopenProject()
def __hgGraftContinue(self):
"""
Private slot used to continue the last copying session after conflicts
were resolved.
"""
shouldReopen = self.vcs.hgGraftContinue(self.project.getProjectPath())
if shouldReopen:
res = E5MessageBox.yesNo(
None,
self.tr("Copy Changesets (Continue)"),
self.tr("""The project should be reread. Do this now?"""),
yesDefault=True)
if res:
self.project.reopenProject()
def __hgAddSubrepository(self):
"""
Private slot used to add a sub-repository.
"""
self.vcs.hgAddSubrepository()
def __hgRemoveSubrepositories(self):
"""
Private slot used to remove sub-repositories.
"""
self.vcs.hgRemoveSubrepositories()
def __hgSummary(self):
"""
Private slot to show a working directory summary.
"""
self.vcs.hgSummary()
def __hgArchive(self):
"""
Private slot to create an unversioned archive from the repository.
"""
self.vcs.hgArchive()
def __hgBookmarksList(self):
"""
Private slot used to list the bookmarks.
"""
self.vcs.hgListBookmarks(self.project.getProjectPath())
def __hgBookmarkDefine(self):
"""
Private slot used to define a bookmark.
"""
self.vcs.hgBookmarkDefine(self.project.getProjectPath())
def __hgBookmarkDelete(self):
"""
Private slot used to delete a bookmark.
"""
self.vcs.hgBookmarkDelete(self.project.getProjectPath())
def __hgBookmarkRename(self):
"""
Private slot used to rename a bookmark.
"""
self.vcs.hgBookmarkRename(self.project.getProjectPath())
def __hgBookmarkMove(self):
"""
Private slot used to move a bookmark.
"""
self.vcs.hgBookmarkMove(self.project.getProjectPath())
def __hgBookmarkIncoming(self):
"""
Private slot used to show a list of incoming bookmarks.
"""
self.vcs.hgBookmarkIncoming(self.project.getProjectPath())
def __hgBookmarkOutgoing(self):
"""
Private slot used to show a list of outgoing bookmarks.
"""
self.vcs.hgBookmarkOutgoing(self.project.getProjectPath())
def __hgBookmarkPull(self):
"""
Private slot used to pull a bookmark from a remote repository.
"""
self.vcs.hgBookmarkPull(self.project.getProjectPath())
def __hgBookmarkPush(self):
"""
Private slot used to push a bookmark to a remote repository.
"""
self.vcs.hgBookmarkPush(self.project.getProjectPath())
|
amatotech/p2pool
|
refs/heads/master
|
p2pool/test/util/test_expiring_dict.py
|
287
|
from twisted.internet import defer
from twisted.trial import unittest
from p2pool.util import deferral, expiring_dict
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_expiring_dict1(self):
e = expiring_dict.ExpiringDict(3, get_touches=True)
e[1] = 2
yield deferral.sleep(1.5)
assert 1 in e
yield deferral.sleep(3)
assert 1 not in e
@defer.inlineCallbacks
def test_expiring_dict2(self):
e = expiring_dict.ExpiringDict(3, get_touches=True)
e[1] = 2
yield deferral.sleep(2.25)
e[1]
yield deferral.sleep(2.25)
assert 1 in e
@defer.inlineCallbacks
def test_expiring_dict3(self):
e = expiring_dict.ExpiringDict(3, get_touches=False)
e[1] = 2
yield deferral.sleep(2.25)
e[1]
yield deferral.sleep(2.25)
assert 1 not in e
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_netstream_global.py
|
13
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_netstream_global
version_added: "2.4"
short_description: Manages global parameters of NetStream on HUAWEI CloudEngine switches.
description:
- Manages global parameters of NetStream on HUAWEI CloudEngine switches.
author: YangYang (@QijunPan)
notes:
- Recommended connection is C(network_cli).
- This module also works with C(local) connections for legacy playbooks.
options:
type:
description:
- Specifies the type of netstream global.
choices: ['ip', 'vxlan']
default: 'ip'
state:
description:
- Specify desired state of the resource.
choices: ['present', 'absent']
default: present
interface:
description:
- Netstream global interface.
required: true
sampler_interval:
description:
- Specifies the netstream sampler interval, length is 1 - 65535.
sampler_direction:
description:
- Specifies the netstream sampler direction.
choices: ['inbound', 'outbound']
statistics_direction:
description:
- Specifies the netstream statistic direction.
choices: ['inbound', 'outbound']
statistics_record:
description:
- Specifies the flexible netstream statistic record, length is 1 - 32.
index_switch:
description:
- Specifies the netstream index-switch.
choices: ['16', '32']
default: '16'
"""
EXAMPLES = '''
- name: netstream global module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure a netstream sampler at interface 10ge1/0/2, direction is outbound,interval is 30.
ce_netstream_global:
interface: 10ge1/0/2
type: ip
sampler_interval: 30
sampler_direction: outbound
state: present
provider: "{{ cli }}"
- name: Configure a netstream flexible statistic at interface 10ge1/0/2, record is test1, type is ip.
ce_netstream_global:
type: ip
interface: 10ge1/0/2
statistics_record: test1
provider: "{{ cli }}"
- name: Set the vxlan index-switch to 32.
ce_netstream_global:
type: vxlan
interface: all
index_switch: 32
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"index_switch": "16",
"interface": "10ge1/0/2",
"state": "present",
"statistics_record": "test",
"type": "vxlan"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"flexible_statistic": [
{
"interface": "10ge1/0/2",
"statistics_record": [],
"type": "ip"
},
{
"interface": "10ge1/0/2",
"statistics_record": [],
"type": "vxlan"
}
],
"index-switch": [
{
"index-switch": "16",
"type": "ip"
},
{
"index-switch": "16",
"type": "vxlan"
}
],
"ip_record": [
"test",
"test1"
],
"sampler": [
{
"interface": "all",
"sampler_direction": "null",
"sampler_interval": "null"
}
],
"statistic": [
{
"interface": "10ge1/0/2",
"statistics_direction": [],
"type": "null"
}
],
"vxlan_record": [
"test"
]}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"flexible_statistic": [
{
"interface": "10ge1/0/2",
"statistics_record": [],
"type": "ip"
},
{
"interface": "10ge1/0/2",
"statistics_record": [
"test"
],
"type": "vxlan"
}
],
"index-switch": [
{
"index-switch": "16",
"type": "ip"
},
{
"index-switch": "16",
"type": "vxlan"
}
],
"sampler": [
{
"interface": "all",
"sampler_direction": "null",
"sampler_interval": "null"
}
],
"statistic": [
{
"interface": "10ge1/0/2",
"statistics_direction": [],
"type": "null"
}
]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface 10ge1/0/2",
"netstream record test vxlan inner-ip"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import load_config
from ansible.module_utils.network.cloudengine.ce import get_connection, rm_config_prefix
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('ALL'):
iftype = 'all'
else:
return None
return iftype.lower()
def get_config(module, flags):
"""Retrieves the current config from the device or cache
"""
time_stamp_regex = re.compile(r'\s*\d{4}-\d{1,2}-\d{1,2}\s+\d{2}\:\d{2}\:\d{2}\.\d+\s*')
flags = [] if flags is None else flags
if isinstance(flags, str):
flags = [flags]
elif not isinstance(flags, list):
flags = []
cmd = 'display current-configuration '
cmd += ' '.join(flags)
cmd = cmd.strip()
conn = get_connection(module)
rc, out, err = conn.exec_command(cmd)
if rc != 0:
module.fail_json(msg=err)
cfg = str(out).strip()
# remove default configuration prefix '~'
for flag in flags:
if "include-default" in flag:
cfg = rm_config_prefix(cfg)
break
lines = cfg.split('\n')
lines = [l for l in lines if time_stamp_regex.match(l) is None]
if cfg.startswith('display'):
if len(lines) > 1:
lines.pop(0)
else:
return ''
return '\n'.join(lines)
class NetStreamGlobal(object):
"""
Manages netstream global parameters.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.type = self.module.params['type']
self.interface = self.module.params['interface']
self.sampler_interval = self.module.params['sampler_interval']
self.sampler_direction = self.module.params['sampler_direction']
self.statistics_direction = self.module.params['statistics_direction']
self.statistics_record = self.module.params['statistics_record']
self.index_switch = self.module.params['index_switch']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
# local parameters
self.existing["sampler"] = list()
self.existing["statistic"] = list()
self.existing["flexible_statistic"] = list()
self.existing["index-switch"] = list()
self.existing["ip_record"] = list()
self.existing["vxlan_record"] = list()
self.end_state["sampler"] = list()
self.end_state["statistic"] = list()
self.end_state["flexible_statistic"] = list()
self.end_state["index-switch"] = list()
self.sampler_changed = False
self.statistic_changed = False
self.flexible_changed = False
self.index_switch_changed = False
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd)
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd)
def get_exist_sampler_interval(self):
"""get exist netstream sampler interval"""
sampler_tmp = dict()
sampler_tmp1 = dict()
flags = list()
exp = " | ignore-case include ^netstream sampler random-packets"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp["sampler_interval"] = "null"
sampler_tmp["sampler_direction"] = "null"
sampler_tmp["interface"] = "null"
else:
config_list = config.split(' ')
config_num = len(config_list)
sampler_tmp["sampler_direction"] = config_list[config_num - 1]
sampler_tmp["sampler_interval"] = config_list[config_num - 2]
sampler_tmp["interface"] = "all"
self.existing["sampler"].append(sampler_tmp)
if self.interface != "all":
flags = list()
exp = r" | ignore-case section include ^#\s+interface %s" \
r" | include netstream sampler random-packets" % self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp1["sampler_interval"] = "null"
sampler_tmp1["sampler_direction"] = "null"
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
sampler_tmp1 = dict()
config_mem_list = config_mem.split(' ')
config_num = len(config_mem_list)
if config_num > 1:
sampler_tmp1["sampler_direction"] = config_mem_list[
config_num - 1]
sampler_tmp1["sampler_interval"] = config_mem_list[
config_num - 2]
sampler_tmp1["interface"] = self.interface
self.existing["sampler"].append(sampler_tmp1)
def get_exist_statistic_record(self):
"""get exist netstream statistic record parameter"""
if self.statistics_record and self.statistics_direction:
self.module.fail_json(
msg='Error: The statistic direction and record can not exist at the same time.')
statistic_tmp = dict()
statistic_tmp1 = dict()
statistic_tmp["statistics_record"] = list()
statistic_tmp["interface"] = self.interface
statistic_tmp1["statistics_record"] = list()
statistic_tmp1["interface"] = self.interface
flags = list()
exp = r" | ignore-case section include ^#\s+interface %s" \
r" | include netstream record"\
% (self.interface)
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp["type"] = "ip"
self.existing["flexible_statistic"].append(statistic_tmp)
statistic_tmp1["type"] = "vxlan"
self.existing["flexible_statistic"].append(statistic_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
statistic_tmp["statistics_record"] = list()
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 3 and str(config_mem_list[3]) == "ip":
statistic_tmp["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp["type"] = "ip"
self.existing["flexible_statistic"].append(statistic_tmp)
for config_mem in config_list:
statistic_tmp1["statistics_record"] = list()
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 3 and str(config_mem_list[3]) == "vxlan":
statistic_tmp1["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp1["type"] = "vxlan"
self.existing["flexible_statistic"].append(statistic_tmp1)
def get_exist_interface_statistic(self):
"""get exist netstream interface statistic parameter"""
statistic_tmp1 = dict()
statistic_tmp1["statistics_direction"] = list()
flags = list()
exp = r" | ignore-case section include ^#\s+interface %s" \
r" | include netstream inbound|outbound"\
% self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp1["type"] = "null"
else:
statistic_tmp1["type"] = "ip"
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 1:
statistic_tmp1["statistics_direction"].append(
str(config_mem_list[1]))
statistic_tmp1["interface"] = self.interface
self.existing["statistic"].append(statistic_tmp1)
def get_exist_index_switch(self):
"""get exist netstream index-switch"""
index_switch_tmp = dict()
index_switch_tmp1 = dict()
index_switch_tmp["index-switch"] = "16"
index_switch_tmp["type"] = "ip"
index_switch_tmp1["index-switch"] = "16"
index_switch_tmp1["type"] = "vxlan"
flags = list()
exp = " | ignore-case include index-switch"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
self.existing["index-switch"].append(index_switch_tmp)
self.existing["index-switch"].append(index_switch_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 2 and str(config_mem_list[2]) == "ip":
index_switch_tmp["index-switch"] = "32"
index_switch_tmp["type"] = "ip"
if len(config_mem_list) > 2 and str(config_mem_list[2]) == "vxlan":
index_switch_tmp1["index-switch"] = "32"
index_switch_tmp1["type"] = "vxlan"
self.existing["index-switch"].append(index_switch_tmp)
self.existing["index-switch"].append(index_switch_tmp1)
def get_exist_record(self):
"""get exist netstream record"""
flags = list()
exp = " | ignore-case include netstream record"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 3 and config_mem_list[3] == "ip":
self.existing["ip_record"].append(config_mem_list[2])
if len(config_mem_list) > 3 and config_mem_list[3] == "vxlan":
self.existing["vxlan_record"].append(config_mem_list[2])
def get_end_sampler_interval(self):
"""get end netstream sampler interval"""
sampler_tmp = dict()
sampler_tmp1 = dict()
flags = list()
exp = " | ignore-case include ^netstream sampler random-packets"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp["sampler_interval"] = "null"
sampler_tmp["sampler_direction"] = "null"
else:
config_list = config.split(' ')
config_num = len(config_list)
if config_num > 1:
sampler_tmp["sampler_direction"] = config_list[config_num - 1]
sampler_tmp["sampler_interval"] = config_list[config_num - 2]
sampler_tmp["interface"] = "all"
self.end_state["sampler"].append(sampler_tmp)
if self.interface != "all":
flags = list()
exp = r" | ignore-case section include ^#\s+interface %s" \
r" | include netstream sampler random-packets" % self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
sampler_tmp1["sampler_interval"] = "null"
sampler_tmp1["sampler_direction"] = "null"
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
sampler_tmp1 = dict()
config_mem_list = config_mem.split(' ')
config_num = len(config_mem_list)
if config_num > 1:
sampler_tmp1["sampler_direction"] = config_mem_list[
config_num - 1]
sampler_tmp1["sampler_interval"] = config_mem_list[
config_num - 2]
sampler_tmp1["interface"] = self.interface
self.end_state["sampler"].append(sampler_tmp1)
def get_end_statistic_record(self):
"""get end netstream statistic record parameter"""
if self.statistics_record and self.statistics_direction:
self.module.fail_json(
msg='Error: The statistic direction and record can not exist at the same time.')
statistic_tmp = dict()
statistic_tmp1 = dict()
statistic_tmp["statistics_record"] = list()
statistic_tmp["interface"] = self.interface
statistic_tmp1["statistics_record"] = list()
statistic_tmp1["interface"] = self.interface
flags = list()
exp = r" | ignore-case section include ^#\s+interface %s" \
r" | include netstream record"\
% (self.interface)
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp["type"] = "ip"
self.end_state["flexible_statistic"].append(statistic_tmp)
statistic_tmp1["type"] = "vxlan"
self.end_state["flexible_statistic"].append(statistic_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
statistic_tmp["statistics_record"] = list()
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 3 and str(config_mem_list[3]) == "ip":
statistic_tmp["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp["type"] = "ip"
self.end_state["flexible_statistic"].append(statistic_tmp)
for config_mem in config_list:
statistic_tmp1["statistics_record"] = list()
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 3 and str(config_mem_list[3]) == "vxlan":
statistic_tmp1["statistics_record"].append(
str(config_mem_list[2]))
statistic_tmp1["type"] = "vxlan"
self.end_state["flexible_statistic"].append(statistic_tmp1)
def get_end_interface_statistic(self):
"""get end netstream interface statistic parameters"""
statistic_tmp1 = dict()
statistic_tmp1["statistics_direction"] = list()
flags = list()
exp = r" | ignore-case section include ^#\s+interface %s" \
r" | include netstream inbound|outbound"\
% self.interface
flags.append(exp)
config = get_config(self.module, flags)
if not config:
statistic_tmp1["type"] = "null"
else:
statistic_tmp1["type"] = "ip"
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 1:
statistic_tmp1["statistics_direction"].append(
str(config_mem_list[1]))
statistic_tmp1["interface"] = self.interface
self.end_state["statistic"].append(statistic_tmp1)
def get_end_index_switch(self):
"""get end netstream index switch"""
index_switch_tmp = dict()
index_switch_tmp1 = dict()
index_switch_tmp["index-switch"] = "16"
index_switch_tmp["type"] = "ip"
index_switch_tmp1["index-switch"] = "16"
index_switch_tmp1["type"] = "vxlan"
flags = list()
exp = " | ignore-case include index-switch"
flags.append(exp)
config = get_config(self.module, flags)
if not config:
self.end_state["index-switch"].append(index_switch_tmp)
self.end_state["index-switch"].append(index_switch_tmp1)
else:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem_list = config_mem.split(' ')
if len(config_mem_list) > 2 and str(config_mem_list[2]) == "ip":
index_switch_tmp["index-switch"] = "32"
index_switch_tmp["type"] = "ip"
if len(config_mem_list) > 2 and str(config_mem_list[2]) == "vxlan":
index_switch_tmp1["index-switch"] = "32"
index_switch_tmp1["type"] = "vxlan"
self.end_state["index-switch"].append(index_switch_tmp)
self.end_state["index-switch"].append(index_switch_tmp1)
def check_params(self):
"""check all input params"""
# netstream parameters check
if not get_interface_type(self.interface):
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
if self.sampler_interval:
if not str(self.sampler_interval).isdigit():
self.module.fail_json(
msg='Error: Active interval should be numerical.')
if int(self.sampler_interval) < 1 or int(self.sampler_interval) > 65535:
self.module.fail_json(
msg="Error: Sampler interval should between 1 - 65535.")
if self.statistics_record:
if len(self.statistics_record) < 1 or len(self.statistics_record) > 32:
self.module.fail_json(
msg="Error: Statistic record length should between 1 - 32.")
if self.interface == "all":
if self.statistics_record or self.statistics_direction:
self.module.fail_json(
msg="Error: Statistic function should be used at interface.")
if self.statistics_direction:
if self.type == "vxlan":
self.module.fail_json(
msg="Error: Vxlan do not support inbound or outbound statistic.")
if (self.sampler_interval and not self.sampler_direction) \
or (self.sampler_direction and not self.sampler_interval):
self.module.fail_json(
msg="Error: Sampler interval and direction must be set at the same time.")
if self.statistics_record and not self.type:
self.module.fail_json(
msg="Error: Statistic type and record must be set at the same time.")
self.get_exist_record()
if self.statistics_record:
if self.type == "ip":
if self.statistics_record not in self.existing["ip_record"]:
self.module.fail_json(
msg="Error: The statistic record is not exist.")
if self.type == "vxlan":
if self.statistics_record not in self.existing["vxlan_record"]:
self.module.fail_json(
msg="Error: The statistic record is not exist.")
def get_proposed(self):
"""get proposed info"""
if self.type:
self.proposed["type"] = self.type
if self.interface:
self.proposed["interface"] = self.interface
if self.sampler_interval:
self.proposed["sampler_interval"] = self.sampler_interval
if self.sampler_direction:
self.proposed["sampler_direction"] = self.sampler_direction
if self.statistics_direction:
self.proposed["statistics_direction"] = self.statistics_direction
if self.statistics_record:
self.proposed["statistics_record"] = self.statistics_record
if self.index_switch:
self.proposed["index_switch"] = self.index_switch
if self.state:
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
sampler_tmp = dict()
statistic_tmp = dict()
statistic_tmp1 = dict()
index_tmp = dict()
temp = False
self.get_exist_sampler_interval()
self.get_exist_interface_statistic()
self.get_exist_statistic_record()
self.get_exist_index_switch()
if self.state == "present":
for sampler_tmp in self.existing["sampler"]:
if self.interface == str(sampler_tmp["interface"]):
temp = True
if (self.sampler_interval and str(sampler_tmp["sampler_interval"]) != self.sampler_interval) \
or (self.sampler_direction and
str(sampler_tmp["sampler_direction"]) != self.sampler_direction):
self.sampler_changed = True
if not temp:
if self.sampler_direction or self.sampler_interval:
self.sampler_changed = True
for statistic_tmp in self.existing["statistic"]:
if str(statistic_tmp["interface"]) == self.interface and self.interface != "all":
if self.type == "vxlan":
if statistic_tmp["statistics_direction"] \
and 'outbound' in statistic_tmp["statistics_direction"]:
self.module.fail_json(
msg='Error: The NetStream record vxlan '
'cannot be configured because the port has been configured NetStream outbound ip.')
if statistic_tmp["statistics_direction"] and self.statistics_direction:
if self.statistics_direction not in statistic_tmp["statistics_direction"]:
self.statistic_changed = True
else:
if self.statistics_direction:
self.statistic_changed = True
for statistic_tmp1 in self.existing["flexible_statistic"]:
if self.interface != "all" \
and self.type == str(statistic_tmp1["type"]) \
and self.interface == str(statistic_tmp1["interface"]):
if statistic_tmp1["statistics_record"] and self.statistics_record:
if self.statistics_record not in statistic_tmp1["statistics_record"]:
self.flexible_changed = True
else:
if self.statistics_record:
self.flexible_changed = True
for index_tmp in self.existing["index-switch"]:
if self.type == str(index_tmp["type"]):
if self.index_switch != str(index_tmp["index-switch"]):
self.index_switch_changed = True
else:
for sampler_tmp in self.existing["sampler"]:
if self.interface == str(sampler_tmp["interface"]):
if (self.sampler_interval and str(sampler_tmp["sampler_interval"]) == self.sampler_interval) \
and (self.sampler_direction and str(sampler_tmp["sampler_direction"]) == self.sampler_direction):
self.sampler_changed = True
for statistic_tmp in self.existing["statistic"]:
if str(statistic_tmp["interface"]) == self.interface and self.interface != "all":
if len(statistic_tmp["statistics_direction"]) and self.statistics_direction:
if self.statistics_direction in statistic_tmp["statistics_direction"]:
self.statistic_changed = True
for statistic_tmp1 in self.existing["flexible_statistic"]:
if self.interface != "all" \
and self.type == str(statistic_tmp1["type"]) \
and self.interface == str(statistic_tmp1["interface"]):
if len(statistic_tmp1["statistics_record"]) and self.statistics_record:
if self.statistics_record in statistic_tmp1["statistics_record"]:
self.flexible_changed = True
for index_tmp in self.existing["index-switch"]:
if self.type == str(index_tmp["type"]):
if self.index_switch == str(index_tmp["index-switch"]):
if self.index_switch != "16":
self.index_switch_changed = True
def operate_ns_gloabl(self):
"""configure netstream global parameters"""
cmd = ""
if not self.sampler_changed and not self.statistic_changed \
and not self.flexible_changed and not self.index_switch_changed:
self.changed = False
return
if self.sampler_changed is True:
if self.type == "vxlan":
self.module.fail_json(
msg="Error: Netstream do not support vxlan sampler.")
if self.interface != "all":
cmd = "interface %s" % self.interface
self.cli_add_command(cmd)
cmd = "netstream sampler random-packets %s %s" % (
self.sampler_interval, self.sampler_direction)
if self.state == "present":
self.cli_add_command(cmd)
else:
self.cli_add_command(cmd, undo=True)
if self.interface != "all":
cmd = "quit"
self.cli_add_command(cmd)
if self.statistic_changed is True:
if self.interface != "all":
cmd = "interface %s" % self.interface
self.cli_add_command(cmd)
cmd = "netstream %s ip" % self.statistics_direction
if self.state == "present":
self.cli_add_command(cmd)
else:
self.cli_add_command(cmd, undo=True)
if self.interface != "all":
cmd = "quit"
self.cli_add_command(cmd)
if self.flexible_changed is True:
if self.interface != "all":
cmd = "interface %s" % self.interface
self.cli_add_command(cmd)
if self.state == "present":
for statistic_tmp in self.existing["flexible_statistic"]:
tmp_list = statistic_tmp["statistics_record"]
if self.type == statistic_tmp["type"]:
if self.type == "ip":
if len(tmp_list) > 0:
cmd = "netstream record %s ip" % tmp_list[0]
self.cli_add_command(cmd, undo=True)
cmd = "netstream record %s ip" % self.statistics_record
self.cli_add_command(cmd)
if self.type == "vxlan":
if len(tmp_list) > 0:
cmd = "netstream record %s vxlan inner-ip" % tmp_list[
0]
self.cli_add_command(cmd, undo=True)
cmd = "netstream record %s vxlan inner-ip" % self.statistics_record
self.cli_add_command(cmd)
else:
if self.type == "ip":
cmd = "netstream record %s ip" % self.statistics_record
self.cli_add_command(cmd, undo=True)
if self.type == "vxlan":
cmd = "netstream record %s vxlan inner-ip" % self.statistics_record
self.cli_add_command(cmd, undo=True)
if self.interface != "all":
cmd = "quit"
self.cli_add_command(cmd)
if self.index_switch_changed is True:
if self.interface != "all":
self.module.fail_json(
msg="Error: Index-switch function should be used globally.")
if self.type == "ip":
cmd = "netstream export ip index-switch %s" % self.index_switch
else:
cmd = "netstream export vxlan inner-ip index-switch %s" % self.index_switch
if self.state == "present":
self.cli_add_command(cmd)
else:
self.cli_add_command(cmd, undo=True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_end_state(self):
"""get end state info"""
self.get_end_sampler_interval()
self.get_end_interface_statistic()
self.get_end_statistic_record()
self.get_end_index_switch()
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_ns_gloabl()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
type=dict(required=False, choices=['ip', 'vxlan'], default='ip'),
interface=dict(required=True, type='str'),
sampler_interval=dict(required=False, type='str'),
sampler_direction=dict(required=False, choices=['inbound', 'outbound']),
statistics_direction=dict(required=False, choices=['inbound', 'outbound']),
statistics_record=dict(required=False, type='str'),
index_switch=dict(required=False, choices=['16', '32'], default='16'),
state=dict(required=False, choices=['present', 'absent'], default='present'),
)
argument_spec.update(ce_argument_spec)
module = NetStreamGlobal(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
cdepman/falcon_api
|
refs/heads/master
|
site-packages/setuptools/command/register.py
|
986
|
import distutils.command.register as orig
class register(orig.register):
__doc__ = orig.register.__doc__
def run(self):
# Make sure that we are using valid current name/version info
self.run_command('egg_info')
orig.register.run(self)
|
Neamar/django
|
refs/heads/master
|
tests/admin_scripts/complex_app/management/commands/duplicate.py
|
554
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
self.stdout.write('complex_app')
|
PatrickChrist/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_dbscan.py
|
346
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
yjmade/odoo
|
refs/heads/8.0
|
addons/subscription/__openerp__.py
|
151
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recurring Documents',
'version': '1.0',
'category': 'Tools',
'description': """
Create recurring documents.
===========================
This module allows to create new documents and add subscriptions on that document.
e.g. To have an invoice generated automatically periodically:
-------------------------------------------------------------
* Define a document type based on Invoice object
* Define a subscription whose source document is the document defined as
above. Specify the interval information and partner to be invoice.
""",
'author': 'OpenERP SA',
'depends': ['base'],
'data': ['security/subcription_security.xml', 'security/ir.model.access.csv', 'subscription_view.xml'],
'demo': ['subscription_demo.xml',],
'installable': True,
'auto_install': False,
'images': ['images/subscription_document_fields.jpeg','images/subscriptions.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dymkowsk/mantid
|
refs/heads/master
|
qt/paraview_ext/VatesSimpleGui/ViewWidgets/test/suite_MDHistoWorkspace/tst_MDHW_from_4D/test.py
|
3
|
def main():
source(findFile("scripts", "test_helpers.py"))
source(findFile("scripts", "common_checks.py"))
startApplication("MantidPlot")
run_script("mdhistos_from_4D.py")
get_workspace("SEQ_4D_rebin")
check_vsi_state(True, "VSI enabled for 4D rebinned MDHW")
get_workspace("SEQ_3D_rebin")
check_vsi_state(True, "VSI enabled for 3D rebinned MDHW")
get_workspace("SEQ_2D_rebin")
check_vsi_state(False, "VSI not enabled for 2D rebinned MDHW")
get_workspace("SEQ_1D_rebin")
check_vsi_state(False, "VSI not enabled for 1D rebinned MDHW")
get_workspace("SEQ_3D_int")
check_vsi_state(True, "VSI enabled for 3D integrated MDHW")
get_workspace("SEQ_2D_int")
check_vsi_state(False, "VSI not enabled for 2D integrated MDHW")
get_workspace("SEQ_1D_int")
check_vsi_state(False, "VSI not enabled for 1D integrated MDHW")
quit_program()
|
ioram7/keystone-federado-pgid2013
|
refs/heads/master
|
build/eventlet/examples/twisted/twisted_server.py
|
8
|
"""Simple chat demo application.
Listen on port 8007 and re-send all the data received to other participants.
Demonstrates how to
* plug in eventlet into a twisted application (join_reactor)
* how to use SpawnFactory to start a new greenlet for each new request.
"""
from eventlet.twistedutil import join_reactor
from eventlet.twistedutil.protocol import SpawnFactory
from eventlet.twistedutil.protocols.basic import LineOnlyReceiverTransport
class Chat:
def __init__(self):
self.participants = []
def handler(self, conn):
peer = conn.getPeer()
print 'new connection from %s' % (peer, )
conn.write("Welcome! There're %s participants already\n" % (len(self.participants)))
self.participants.append(conn)
try:
for line in conn:
if line:
print 'received from %s: %s' % (peer, line)
for buddy in self.participants:
if buddy is not conn:
buddy.sendline('from %s: %s' % (peer, line))
except Exception, ex:
print peer, ex
else:
print peer, 'connection done'
finally:
conn.loseConnection()
self.participants.remove(conn)
print __doc__
chat = Chat()
from twisted.internet import reactor
reactor.listenTCP(8007, SpawnFactory(chat.handler, LineOnlyReceiverTransport))
reactor.run()
|
lepistone/odoo
|
refs/heads/master
|
addons/account_analytic_analysis/__openerp__.py
|
62
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Contracts Management',
'version': '1.1',
'category': 'Sales Management',
'description': """
This module is for modifying account analytic view to show important data to project manager of services companies.
===================================================================================================================
Adds menu to show relevant information to each manager.You can also view the report of account analytic summary user-wise as well as month-wise.
""",
'author': 'Camptocamp',
'website': 'http://www.camptocamp.com/',
'images': ['images/bill_tasks_works.jpeg','images/overpassed_accounts.jpeg'],
'depends': ['hr_timesheet_invoice', 'sale'], #although sale is technically not required to install this module, all menuitems are located under 'Sales' application
'data': [
'security/ir.model.access.csv',
'security/account_analytic_analysis_security.xml',
'account_analytic_analysis_view.xml',
'account_analytic_analysis_cron.xml',
'res_config_view.xml',
'views/account_analytic_analysis.xml',
],
'demo': ['analytic_account_demo.xml'],
'test': ['test/account_analytic_analysis.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
JRock007/boxxy
|
refs/heads/master
|
dist/Boxxy.app/Contents/Resources/lib/python2.7/numpy/ma/__init__.py
|
76
|
"""
=============
Masked Arrays
=============
Arrays sometimes contain invalid or missing data. When doing operations
on such arrays, we wish to suppress invalid values, which is the purpose masked
arrays fulfill (an example of typical use is given below).
For example, examine the following array:
>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan])
When we try to calculate the mean of the data, the result is undetermined:
>>> np.mean(x)
nan
The mean is calculated using roughly ``np.sum(x)/len(x)``, but since
any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter
masked arrays:
>>> m = np.ma.masked_array(x, np.isnan(x))
>>> m
masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --],
mask = [False False False True False False False True],
fill_value=1e+20)
Here, we construct a masked array that suppress all ``NaN`` values. We
may now proceed to calculate the mean of the other values:
>>> np.mean(m)
2.6666666666666665
.. [1] Not-a-Number, a floating point value that is the result of an
invalid operation.
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)"
__version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
from . import core
from .core import *
from . import extras
from .extras import *
__all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
nomaro/SickBeard_Backup
|
refs/heads/master
|
lib/requests/packages/chardet/big5freq.py
|
323
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = ( \
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
|
robovm/robovm-studio
|
refs/heads/master
|
python/testData/addImport/newThirdPartyImportInBetween/main.after.py
|
75
|
import sys
import third_party
import a
print(sys, third_party, a)
|
rocopartners/django-oscar
|
refs/heads/master
|
src/oscar/templatetags/image_tags.py
|
57
|
from django import template
from django.conf import settings
from django.db.models.fields.files import ImageFieldFile
register = template.Library()
def do_dynamic_image_url(parser, token):
tokens = token.split_contents()
if len(tokens) < 2:
raise template.TemplateSyntaxError(
"%r tag requires at least an image URL or field" % tokens[0])
image = tokens[1]
if len(tokens) > 2:
params = tokens[2:]
else:
params = []
return DynamicImageNode(image, params)
class DynamicImageNode(template.Node):
def __init__(self, image, params):
self.image = image
self.params = {}
for p in params:
try:
bits = p.split('=')
self.params[bits[0]] = template.Variable(bits[1])
except IndexError:
raise template.TemplateSyntaxError(
"image tag parameters must be of form key=value, "
"you used '%s'" % p)
def render(self, context):
if isinstance(self.image, ImageFieldFile):
path = self.image.name
else:
path = self.image
host = getattr(settings, 'DYNAMIC_MEDIA_URL', None)
if host:
params = []
ext = path[path.rfind('.') + 1:]
ext_changed = False
for key, v in self.params.items():
value = v.resolve(context)
if key == u'format':
ext = value
ext_changed = True
else:
params.append('%s-%s' % (key, value))
if len(params) > 0:
suffix = '_'.join(params)
path = '.'.join((path, suffix, ext))
else:
if ext_changed:
if params:
path = '.'.join((path, ext))
else:
path = '.'.join((path, 'to', ext))
return host + path
register.tag('image', do_dynamic_image_url)
|
krintoxi/NoobSec-Toolkit
|
refs/heads/master
|
NoobSecToolkit - MAC OSX/scripts/sshbackdoors/rpyc/lib/__init__.py
|
14
|
"""
A library of various helpers functions and classes
"""
import sys
import logging
class MissingModule(object):
__slots__ = ["__name"]
def __init__(self, name):
self.__name = name
def __getattr__(self, name):
if name.startswith("__"): # issue 71
raise AttributeError("module %r not found" % (self.__name,))
raise ImportError("module %r not found" % (self.__name,))
def __bool__(self):
return False
__nonzero__ = __bool__
def safe_import(name):
try:
mod = __import__(name, None, None, "*")
except ImportError:
mod = MissingModule(name)
except Exception:
# issue 72: IronPython on Mono
if sys.platform == "cli" and name == "signal": #os.name == "posix":
mod = MissingModule(name)
else:
raise
return mod
def setup_logger(quiet = False, logfile = None):
opts = {}
if quiet:
opts['level'] = logging.ERROR
else:
opts['level'] = logging.DEBUG
if logfile:
opts['file'] = logfile
logging.basicConfig(**opts)
|
Lancea12/sudoku_solver
|
refs/heads/master
|
sudoku/models/choice.py
|
2
|
from django.db import models
from solver.models.cell import Cell
class Choice(models.Model):
cell = models.ForeignKey(Cell)
val = models.IntegerField()
class Meta:
app_label = "solver"
|
sbesson/zeroc-ice
|
refs/heads/master
|
cpp/test/IceUtil/uuid/run.py
|
5
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
client = os.path.join(os.getcwd(), "client")
TestUtil.simpleTest(client)
|
SUNET/eduid-webapp
|
refs/heads/master
|
src/eduid_webapp/letter_proofing/views.py
|
1
|
# -*- coding: utf-8 -*-
from flask import Blueprint, abort
from eduid_common.api.decorators import MarshalWith, UnmarshalWith, can_verify_identity, require_user
from eduid_common.api.exceptions import AmTaskFailed, MsgTaskFailed
from eduid_common.api.helpers import add_nin_to_user, check_magic_cookie, verify_nin_for_user
from eduid_common.api.messages import CommonMsg, FluxData, error_response, success_response
from eduid_common.misc.timeutil import utc_now
from eduid_userdb import User
from eduid_userdb.logs import LetterProofing
from eduid_userdb.proofing import ProofingUser
from eduid_webapp.letter_proofing import pdf, schemas
from eduid_webapp.letter_proofing.app import current_letterp_app as current_app
from eduid_webapp.letter_proofing.ekopost import EkopostException
from eduid_webapp.letter_proofing.helpers import LetterMsg, check_state, create_proofing_state, get_address, send_letter
__author__ = 'lundberg'
letter_proofing_views = Blueprint('letter_proofing', __name__, url_prefix='', template_folder='templates')
@letter_proofing_views.route('/proofing', methods=['GET'])
@MarshalWith(schemas.LetterProofingResponseSchema)
@require_user
def get_state(user) -> FluxData:
current_app.logger.info('Getting proofing state for user {}'.format(user))
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
if proofing_state:
current_app.logger.info('Found proofing state for user {}'.format(user))
result = check_state(proofing_state)
if result.is_expired and current_app.conf.backwards_compat_remove_expired_state:
current_app.logger.info(f'Backwards-compat removing expired state for user {user}')
current_app.proofing_statedb.remove_state(proofing_state)
current_app.stats.count('letter_expired')
return success_response(message=LetterMsg.no_state)
return result.to_response()
return success_response(message=LetterMsg.no_state)
@letter_proofing_views.route('/proofing', methods=['POST'])
@UnmarshalWith(schemas.LetterProofingRequestSchema)
@MarshalWith(schemas.LetterProofingResponseSchema)
@can_verify_identity
@require_user
def proofing(user: User, nin: str) -> FluxData:
current_app.logger.info('Send letter for user {} initiated'.format(user))
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
# No existing proofing state was found, create a new one
if not proofing_state:
# Create a LetterNinProofingUser in proofingdb
proofing_state = create_proofing_state(user.eppn, nin)
current_app.logger.info('Created proofing state for user {}'.format(user))
# Add the nin used to initiate the proofing state to the user
# NOOP if the user already have the nin
add_nin_to_user(user, proofing_state)
if proofing_state.proofing_letter.is_sent:
current_app.logger.info('A letter has already been sent to the user.')
current_app.logger.debug('Proofing state: {}'.format(proofing_state.to_dict()))
result = check_state(proofing_state)
if result.error:
# error message
return result.to_response()
if not result.is_expired:
return result.to_response()
current_app.logger.info('The letter has expired. Sending a new one...')
current_app.proofing_statedb.remove_state(proofing_state)
current_app.logger.info(f'Removed {proofing_state}')
current_app.stats.count('letter_expired')
proofing_state = create_proofing_state(user.eppn, nin)
current_app.logger.info(f'Created new {proofing_state}')
try:
address = get_address(user, proofing_state)
if not address:
current_app.logger.error('No address found for user {}'.format(user))
return error_response(message=LetterMsg.address_not_found)
except MsgTaskFailed:
current_app.logger.exception(f'Navet lookup failed for user {user}')
current_app.stats.count('navet_error')
return error_response(message=CommonMsg.navet_error)
# Set and save official address
proofing_state.proofing_letter.address = address
current_app.proofing_statedb.save(proofing_state)
try:
campaign_id = send_letter(user, proofing_state)
current_app.stats.count('letter_sent')
except pdf.AddressFormatException:
current_app.logger.exception('Failed formatting address')
current_app.stats.count('address_format_error')
return error_response(message=LetterMsg.bad_address)
except EkopostException:
current_app.logger.exception('Ekopost returned an error')
current_app.stats.count('ekopost_error')
return error_response(message=CommonMsg.temp_problem)
# Save the users proofing state
proofing_state.proofing_letter.transaction_id = campaign_id
proofing_state.proofing_letter.is_sent = True
proofing_state.proofing_letter.sent_ts = utc_now()
current_app.proofing_statedb.save(proofing_state)
result = check_state(proofing_state)
result.message = LetterMsg.letter_sent
return result.to_response()
@letter_proofing_views.route('/verify-code', methods=['POST'])
@UnmarshalWith(schemas.VerifyCodeRequestSchema)
@MarshalWith(schemas.VerifyCodeResponseSchema)
@require_user
def verify_code(user: User, code: str) -> FluxData:
current_app.logger.info('Verifying code for user {}'.format(user))
proofing_state = current_app.proofing_statedb.get_state_by_eppn(user.eppn, raise_on_missing=False)
if not proofing_state:
return error_response(message=LetterMsg.no_state)
# Check if provided code matches the one in the letter
if not code == proofing_state.nin.verification_code:
current_app.logger.error('Verification code for user {} does not match'.format(user))
# TODO: Throttling to discourage an adversary to try brute force
return error_response(message=LetterMsg.wrong_code)
state_info = check_state(proofing_state)
if state_info.error:
return state_info.to_response()
if state_info.is_expired:
# This is not an error in the get_state view, but here it is an error so 'upgrade' it.
state_info.error = True
current_app.logger.warning(f'Tried to validate expired state: {proofing_state}')
return state_info.to_response()
try:
# Fetch registered address again, to save the address of record at time of verification.
official_address = get_address(user, proofing_state)
except MsgTaskFailed:
current_app.logger.exception(f'Navet lookup failed for user {user}')
current_app.stats.count('navet_error')
return error_response(message=CommonMsg.navet_error)
proofing_log_entry = LetterProofing(
eppn=user.eppn,
created_by='eduid_letter_proofing',
nin=proofing_state.nin.number,
letter_sent_to=proofing_state.proofing_letter.address,
transaction_id=proofing_state.proofing_letter.transaction_id,
user_postal_address=official_address,
proofing_version='2016v1',
)
try:
# Verify nin for user
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
if not verify_nin_for_user(proofing_user, proofing_state, proofing_log_entry):
current_app.logger.error(f'Failed verifying NIN for user {user}')
return error_response(message=CommonMsg.temp_problem)
current_app.logger.info(f'Verified code for user {user}')
# Remove proofing state
current_app.proofing_statedb.remove_state(proofing_state)
current_app.stats.count(name='nin_verified')
return success_response(
payload=dict(nins=proofing_user.nins.to_list_of_dicts()), message=LetterMsg.verify_success
)
except AmTaskFailed:
current_app.logger.exception(f'Verifying nin for user {user} failed')
return error_response(message=CommonMsg.temp_problem)
@letter_proofing_views.route('/get-code', methods=['GET'])
@require_user
def get_code(user):
"""
Backdoor to get the verification code in the staging or dev environments
"""
try:
if check_magic_cookie(current_app.conf):
state = current_app.proofing_statedb.get_state_by_eppn(user.eppn)
return state.nin.verification_code
except Exception:
current_app.logger.exception(f"{user} tried to use the backdoor to get the letter verification code for a NIN")
abort(400)
|
rcbops/python-django-buildpackage
|
refs/heads/master
|
django/contrib/flatpages/middleware.py
|
641
|
from django.contrib.flatpages.views import flatpage
from django.http import Http404
from django.conf import settings
class FlatpageFallbackMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a flatpage for non-404 responses.
try:
return flatpage(request, request.path_info)
# Return the original response if any errors happened. Because this
# is a middleware, we can't assume the errors will be caught elsewhere.
except Http404:
return response
except:
if settings.DEBUG:
raise
return response
|
alexander-bauer/network-markup
|
refs/heads/development
|
python/attributes.py
|
1
|
import igraph
attb = {'disabled': {'color': 'grey'}}
# Applies known attributes, such as "disabled" to vertices by changing
# them into igraph-recognized values. The "label" attribute is
# automatically set to the supplied name. It returns the modified
# vertex.
def apply(vertex, name, node):
vertex["label"] = name
try:
attributes = node["attributes"]
except Exception:
return vertex
for aName, aVal in attributes.items():
# If the attb dictionary contains the relevant attribute, then
# apply all changes implied.
try:
changes = attb[aName]
for cName, cVal in changes.items():
vertex[cName] = cVal
except Exception:
print "Adding attribute: using literal " + str(aName)
# If the change isn't known, though, try to apply it
# anyway. This is for cases in which the attributes are
# literal, like {'color':'blue'}
vertex[str(aName)] = aVal
return vertex
|
yunity/foodsaving-backend
|
refs/heads/master
|
karrot/groups/migrations/0024_groupmembership_lastseen_at__required_20180304_1501.py
|
2
|
# Generated by Django 2.0.2 on 2018-03-04 15:01
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('groups', '0023_set_sensible_lastseen_at_20180304_1330'),
]
operations = [
migrations.AlterField(
model_name='groupmembership',
name='lastseen_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
VirtueSecurity/aws-extender
|
refs/heads/master
|
BappModules/docutils/parsers/rst/directives/tables.py
|
7
|
# $Id: tables.py 8039 2017-02-28 12:19:20Z milde $
# Authors: David Goodger <goodger@python.org>; David Priest
# Copyright: This module has been placed in the public domain.
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
def align(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'align': align,
'widths': directives.value_or(('auto', 'grid'),
directives.positive_int_list)}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
(title.source,
title.line) = self.state_machine.get_source_and_line(self.lineno)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
@property
def widths(self):
return self.options.get('widths', '')
def get_column_widths(self, max_cols):
if type(self.widths) == list:
if len(self.widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
col_widths = self.widths
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
if 'align' in self.options:
table_node['align'] = self.options.get('align')
tgroup = table_node[0]
if type(self.widths) == list:
colspecs = [child for child in tgroup.children
if child.tagname == 'colspec']
for colspec, col_width in zip(colspecs, self.widths):
colspec['colwidth'] = col_width
# @@@ the colwidths argument for <tgroup> is not part of the
# XML Exchange Table spec (https://www.oasis-open.org/specs/tm9901.htm)
# and hence violates the docutils.dtd.
if self.widths == 'auto':
table_node['classes'] += ['colwidths-auto']
elif self.widths: # "grid" or list of integers
table_node['classes'] += ['colwidths-given']
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.value_or(('auto', ),
directives.positive_int_list),
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
'align': align,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = CSVTable.encode_for_csv(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = CSVTable.encode_for_csv(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = CSVTable.encode_for_csv(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
message = str(detail)
if sys.version_info < (3,) and '1-character string' in message:
message += '\nwith Python 2.x this must be an ASCII character.'
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, message), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns, widths=self.widths)
table_node['classes'] += self.options.get('class', [])
if 'align' in self.options:
table_node['align'] = self.options.get('align')
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.value_or(('auto', ),
directives.positive_int_list),
'class': directives.class_option,
'name': directives.unchanged,
'align': align}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
if 'align' in self.options:
table_node['align'] = self.options.get('align')
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
if self.widths == 'auto':
table['classes'] += ['colwidths-auto']
elif self.widths: # "grid" or list of integers
table['classes'] += ['colwidths-given']
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec()
if col_width is not None:
colspec.attributes['colwidth'] = col_width
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
|
sjsrey/pysal_core
|
refs/heads/master
|
pysal_core/cg/shapes.py
|
2
|
"""
Computational geometry code for PySAL: Python Spatial Analysis Library.
"""
__author__ = "Sergio J. Rey, Xinyue Ye, Charles Schmidt, Andrew Winslow"
__credits__ = "Copyright (c) 2005-2009 Sergio J. Rey"
import doctest
import math
from warnings import warn
from .sphere import arcdist
import numpy as np
__all__ = ['Point', 'LineSegment', 'Line', 'Ray', 'Chain', 'Polygon',
'Rectangle', 'asShape']
def asShape(obj):
"""
Returns a pysal shape object from obj.
obj must support the __geo_interface__.
"""
if isinstance(obj, (Point, LineSegment, Line, Ray, Chain, Polygon)):
return obj
if hasattr(obj, '__geo_interface__'):
geo = obj.__geo_interface__
else:
geo = obj
if hasattr(geo, 'type'):
raise TypeError('%r does not appear to be a shape object' % (obj))
geo_type = geo['type'].lower()
#if geo_type.startswith('multi'):
# raise NotImplementedError, "%s are not supported at this time."%geo_type
if geo_type in _geoJSON_type_to_Pysal_type:
return _geoJSON_type_to_Pysal_type[geo_type].__from_geo_interface__(geo)
else:
raise NotImplementedError(
"%s is not supported at this time." % geo_type)
class Geometry(object):
"""
A base class to help implement is_geometry and make geometric types
extendable.
"""
def __init__(self):
pass
class Point(Geometry):
"""
Geometric class for point objects.
Attributes
----------
None
"""
def __init__(self, loc):
"""
Returns an instance of a Point object.
__init__((number, number)) -> Point
Test tag: <tc>#is#Point.__init__</tc>
Test tag: <tc>#tests#Point.__init__</tc>
Parameters
----------
loc : tuple location (number x-tuple, x > 1)
Attributes
----------
Examples
--------
>>> p = Point((1, 3))
"""
self.__loc = tuple(map(float, loc))
@classmethod
def __from_geo_interface__(cls, geo):
return cls(geo['coordinates'])
@property
def __geo_interface__(self):
return {'type': 'Point', 'coordinates': self.__loc}
def __lt__(self, other):
"""
Tests if the Point is < another object.
__ne__(x) -> bool
Parameters
----------
other : an object to test equality against
Attributes
----------
Examples
--------
>>> Point((0,1)) < Point((0,1))
False
>>> Point((0,1)) < Point((1,1))
True
"""
return (self.__loc) < (other.__loc)
def __le__(self, other):
"""
Tests if the Point is <= another object.
__ne__(x) -> bool
Parameters
----------
other : an object to test equality against
Attributes
----------
Examples
--------
>>> Point((0,1)) <= Point((0,1))
True
>>> Point((0,1)) <= Point((1,1))
True
"""
return (self.__loc) <= (other.__loc)
def __eq__(self, other):
"""
Tests if the Point is equal to another object.
__eq__(x) -> bool
Parameters
----------
other : an object to test equality against
Attributes
----------
Examples
--------
>>> Point((0,1)) == Point((0,1))
True
>>> Point((0,1)) == Point((1,1))
False
"""
try:
return (self.__loc) == (other.__loc)
except AttributeError:
return False
def __ne__(self, other):
"""
Tests if the Point is not equal to another object.
__ne__(x) -> bool
Parameters
----------
other : an object to test equality against
Attributes
----------
Examples
--------
>>> Point((0,1)) != Point((0,1))
False
>>> Point((0,1)) != Point((1,1))
True
"""
try:
return (self.__loc) != (other.__loc)
except AttributeError:
return True
def __gt__(self, other):
"""
Tests if the Point is > another object.
__ne__(x) -> bool
Parameters
----------
other : an object to test equality against
Attributes
----------
Examples
--------
>>> Point((0,1)) > Point((0,1))
False
>>> Point((0,1)) > Point((1,1))
False
"""
return (self.__loc) > (other.__loc)
def __ge__(self, other):
"""
Tests if the Point is >= another object.
__ne__(x) -> bool
Parameters
----------
other : an object to test equality against
Attributes
----------
Examples
--------
>>> Point((0,1)) >= Point((0,1))
True
>>> Point((0,1)) >= Point((1,1))
False
"""
return (self.__loc) >= (other.__loc)
def __hash__(self):
"""
Returns the hash of the Point's location.
x.__hash__() -> hash(x)
Parameters
----------
None
Attributes
----------
Examples
--------
>>> hash(Point((0,1))) == hash(Point((0,1)))
True
>>> hash(Point((0,1))) == hash(Point((1,1)))
False
"""
return hash(self.__loc)
def __getitem__(self, *args):
"""
Return the coordinate for the given dimension.
x.__getitem__(i) -> x[i]
Parameters
----------
i : index of the desired dimension.
Attributes
----------
Examples
--------
>>> p = Point((5.5,4.3))
>>> p[0] == 5.5
True
>>> p[1] == 4.3
True
"""
return self.__loc.__getitem__(*args)
def __getslice__(self, *args):
"""
Return the coordinate for the given dimensions.
x.__getitem__(i,j) -> x[i:j]
Parameters
----------
i : index to start slice
j : index to end slice (excluded).
Attributes
----------
Examples
--------
>>> p = Point((3,6,2))
>>> p[:2] == (3,6)
True
>>> p[1:2] == (6,)
True
"""
return self.__loc.__getslice__(*args)
def __len__(self):
"""
Returns the number of dimension in the point.
__len__() -> int
Parameters
----------
None
Attributes
----------
Examples
--------
>>> len(Point((1,2)))
2
"""
return len(self.__loc)
def __repr__(self):
"""
Returns the string representation of the Point
__repr__() -> string
Parameters
----------
None
Attributes
----------
Examples
--------
>>> Point((0,1))
(0.0, 1.0)
"""
return str(self)
def __str__(self):
"""
Returns a string representation of a Point object.
__str__() -> string
Test tag: <tc>#is#Point.__str__</tc>
Test tag: <tc>#tests#Point.__str__</tc>
Attributes
----------
Examples
--------
>>> p = Point((1, 3))
>>> str(p)
'(1.0, 3.0)'
"""
return str(self.__loc)
return "POINT ({} {})".format(*self.__loc)
class LineSegment(Geometry):
"""
Geometric representation of line segment objects.
Parameters
----------
start_pt : Point
Point where segment begins
end_pt : Point
Point where segment ends
Attributes
----------
p1 : Point
Starting point
p2 : Point
Ending point
bounding_box : tuple
The bounding box of the segment (number 4-tuple)
len : float
The length of the segment
line : Line
The line on which the segment lies
"""
def __init__(self, start_pt, end_pt):
"""
Creates a LineSegment object.
__init__(Point, Point) -> LineSegment
Test tag: <tc>#is#LineSegment.__init__</tc>
Test tag: <tc>#tests#LineSegment.__init__</tc>
Attributes
----------
None
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
"""
self._p1 = start_pt
self._p2 = end_pt
self._reset_props()
def __str__(self):
return "LineSegment(" + str(self._p1) + ", " + str(self._p2) + ")"
return "LINESTRING ({} {}, {} {})".format(self._p1[0], self._p1[1],
self._p2[0], self._p2[1])
def __eq__(self, other):
"""
Returns true if self and other are the same line segment
Examples
--------
>>> l1 = LineSegment(Point((1, 2)), Point((5, 6)))
>>> l2 = LineSegment(Point((5, 6)), Point((1, 2)))
>>> l1 == l2
True
>>> l2 == l1
True
"""
if not isinstance(other, self.__class__):
return False
if (other.p1 == self._p1 and other.p2 == self._p2):
return True
elif (other.p2 == self._p1 and other.p1 == self._p2):
return True
return False
def intersect(self, other):
"""
Test whether segment intersects with other segment
Handles endpoints of segments being on other segment
Examples
--------
>>> ls = LineSegment(Point((5,0)), Point((10,0)))
>>> ls1 = LineSegment(Point((5,0)), Point((10,1)))
>>> ls.intersect(ls1)
True
>>> ls2 = LineSegment(Point((5,1)), Point((10,1)))
>>> ls.intersect(ls2)
False
>>> ls2 = LineSegment(Point((7,-1)), Point((7,2)))
>>> ls.intersect(ls2)
True
>>>
"""
ccw1 = self.sw_ccw(other.p2)
ccw2 = self.sw_ccw(other.p1)
ccw3 = other.sw_ccw(self.p1)
ccw4 = other.sw_ccw(self.p2)
return ccw1*ccw2 <= 0 and ccw3*ccw4 <=0
def _reset_props(self):
"""
HELPER METHOD. DO NOT CALL.
Resets attributes which are functions of other attributes. The getters for these attributes (implemented as
properties) then recompute their values if they have been reset since the last call to the getter.
_reset_props() -> None
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> ls._reset_props()
"""
self._bounding_box = None
self._len = None
self._line = False
def _get_p1(self):
"""
HELPER METHOD. DO NOT CALL.
Returns the p1 attribute of the line segment.
_get_p1() -> Point
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._get_p1()
>>> r == Point((1, 2))
True
"""
return self._p1
def _set_p1(self, p1):
"""
HELPER METHOD. DO NOT CALL.
Sets the p1 attribute of the line segment.
_set_p1(Point) -> Point
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._set_p1(Point((3, -1)))
>>> r == Point((3.0, -1.0))
True
"""
self._p1 = p1
self._reset_props()
return self._p1
p1 = property(_get_p1, _set_p1)
def _get_p2(self):
"""
HELPER METHOD. DO NOT CALL.
Returns the p2 attribute of the line segment.
_get_p2() -> Point
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._get_p2()
>>> r == Point((5, 6))
True
"""
return self._p2
def _set_p2(self, p2):
"""
HELPER METHOD. DO NOT CALL.
Sets the p2 attribute of the line segment.
_set_p2(Point) -> Point
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> r = ls._set_p2(Point((3, -1)))
>>> r == Point((3.0, -1.0))
True
"""
self._p2 = p2
self._reset_props()
return self._p2
p2 = property(_get_p2, _set_p2)
def is_ccw(self, pt):
"""
Returns whether a point is counterclockwise of the segment. Exclusive.
is_ccw(Point) -> bool
Test tag: <tc>#is#LineSegment.is_ccw</tc>
Test tag: <tc>#tests#LineSegment.is_ccw</tc>
Parameters
----------
pt : point lying ccw or cw of a segment
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((0, 0)), Point((5, 0)))
>>> ls.is_ccw(Point((2, 2)))
True
>>> ls.is_ccw(Point((2, -2)))
False
"""
v1 = (self._p2[0] - self._p1[0], self._p2[1] - self._p1[1])
v2 = (pt[0] - self._p1[0], pt[1] - self._p1[1])
return v1[0] * v2[1] - v1[1] * v2[0] > 0
def is_cw(self, pt):
"""
Returns whether a point is clockwise of the segment. Exclusive.
is_cw(Point) -> bool
Test tag: <tc>#is#LineSegment.is_cw</tc>
Test tag: <tc>#tests#LineSegment.is_cw</tc>
Parameters
----------
pt : point lying ccw or cw of a segment
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((0, 0)), Point((5, 0)))
>>> ls.is_cw(Point((2, 2)))
False
>>> ls.is_cw(Point((2, -2)))
True
"""
v1 = (self._p2[0] - self._p1[0], self._p2[1] - self._p1[1])
v2 = (pt[0] - self._p1[0], pt[1] - self._p1[1])
return v1[0] * v2[1] - v1[1] * v2[0] < 0
def sw_ccw(self, pt):
"""
Sedgewick test for pt being ccw of segment
Returns
-------
1 if turn from self.p1 to self.p2 to pt is ccw
-1 if turn from self.p1 to self.p2 to pt is cw
-1 if the points are collinear and self.p1 is in the middle
1 if the points are collinear and self.p2 is in the middle
0 if the points are collinear and pt is in the middle
"""
p0 = self.p1
p1 = self.p2
p2 = pt
dx1 = p1[0] - p0[0]
dy1 = p1[1] - p0[1]
dx2 = p2[0] - p0[0]
dy2 = p2[1] - p0[1]
if dy1*dx2 < dy2*dx1:
return 1
if dy1*dx2 > dy2*dx1:
return -1
if (dx1*dx2 < 0 or dy1*dy2 <0):
return -1
if dx1*dx1 + dy1*dy1 >= dx2*dx2 + dy2*dy2:
return 0
else:
return 1
def get_swap(self):
"""
Returns a LineSegment object which has its endpoints swapped.
get_swap() -> LineSegment
Test tag: <tc>#is#LineSegment.get_swap</tc>
Test tag: <tc>#tests#LineSegment.get_swap</tc>
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> swap = ls.get_swap()
>>> swap.p1[0]
5.0
>>> swap.p1[1]
6.0
>>> swap.p2[0]
1.0
>>> swap.p2[1]
2.0
"""
return LineSegment(self._p2, self._p1)
@property
def bounding_box(self):
"""
Returns the minimum bounding box of a LineSegment object.
Test tag: <tc>#is#LineSegment.bounding_box</tc>
Test tag: <tc>#tests#LineSegment.bounding_box</tc>
bounding_box -> Rectangle
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((1, 2)), Point((5, 6)))
>>> ls.bounding_box.left
1.0
>>> ls.bounding_box.lower
2.0
>>> ls.bounding_box.right
5.0
>>> ls.bounding_box.upper
6.0
"""
if self._bounding_box is None: # If LineSegment attributes p1, p2 changed, recompute
self._bounding_box = Rectangle(
min([self._p1[0], self._p2[0]]), min([
self._p1[1], self._p2[1]]),
max([self._p1[0], self._p2[0]]), max([self._p1[1], self._p2[1]]))
return Rectangle(
self._bounding_box.left, self._bounding_box.lower, self._bounding_box.right,
self._bounding_box.upper)
@property
def len(self):
"""
Returns the length of a LineSegment object.
Test tag: <tc>#is#LineSegment.len</tc>
Test tag: <tc>#tests#LineSegment.len</tc>
len() -> number
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((2, 2)), Point((5, 2)))
>>> ls.len
3.0
"""
if self._len is None: # If LineSegment attributes p1, p2 changed, recompute
self._len = math.hypot(self._p1[0] - self._p2[0],
self._p1[1] - self._p2[1])
return self._len
@property
def line(self):
"""
Returns a Line object of the line which the segment lies on.
Test tag: <tc>#is#LineSegment.line</tc>
Test tag: <tc>#tests#LineSegment.line</tc>
line() -> Line
Attributes
----------
Examples
--------
>>> ls = LineSegment(Point((2, 2)), Point((3, 3)))
>>> l = ls.line
>>> l.m
1.0
>>> l.b
0.0
"""
if self._line == False:
dx = self._p1[0] - self._p2[0]
dy = self._p1[1] - self._p2[1]
if dx == 0 and dy == 0:
self._line = None
elif dx == 0:
self._line = VerticalLine(self._p1[0])
else:
m = dy / float(dx)
b = self._p1[1] - m * self._p1[0] # y - mx
self._line = Line(m, b)
return self._line
class VerticalLine(Geometry):
"""
Geometric representation of verticle line objects.
Attributes
----------
x : float
x-intercept
"""
def __init__(self, x):
"""
Returns a VerticalLine object.
__init__(number) -> VerticalLine
Parameters
----------
x : the x-intercept of the line
Attributes
----------
Examples
--------
>>> ls = VerticalLine(0)
>>> ls.m
inf
>>> ls.b
nan
"""
self._x = float(x)
self.m = float('inf')
self.b = float('nan')
def x(self, y):
"""
Returns the x-value of the line at a particular y-value.
x(number) -> number
Parameters
----------
y : the y-value to compute x at
Attributes
----------
Examples
--------
>>> l = VerticalLine(0)
>>> l.x(0.25)
0.0
"""
return self._x
def y(self, x):
"""
Returns the y-value of the line at a particular x-value.
y(number) -> number
Parameters
----------
x : the x-value to compute y at
Attributes
----------
Examples
--------
>>> l = VerticalLine(1)
>>> l.y(1)
nan
"""
return float('nan')
class Line(Geometry):
"""
Geometric representation of line objects.
Attributes
----------
m : float
slope
b : float
y-intercept
"""
def __init__(self, m, b):
"""
Returns a Line object.
__init__(number, number) -> Line
Test tag: <tc>#is#Line.__init__</tc>
Test tag: <tc>#tests#Line.__init__</tc>
Parameters
----------
m : the slope of the line
b : the y-intercept of the line
Attributes
----------
Examples
--------
>>> ls = Line(1, 0)
>>> ls.m
1.0
>>> ls.b
0.0
"""
if m == float('inf') or m == float('inf'):
raise ArithmeticError('Slope cannot be infinite.')
self.m = float(m)
self.b = float(b)
def x(self, y):
"""
Returns the x-value of the line at a particular y-value.
x(number) -> number
Parameters
----------
y : the y-value to compute x at
Attributes
----------
Examples
--------
>>> l = Line(0.5, 0)
>>> l.x(0.25)
0.5
"""
if self.m == 0:
raise ArithmeticError('Cannot solve for X when slope is zero.')
return (y - self.b) / self.m
def y(self, x):
"""
Returns the y-value of the line at a particular x-value.
y(number) -> number
Parameters
----------
x : the x-value to compute y at
Attributes
----------
Examples
--------
>>> l = Line(1, 0)
>>> l.y(1)
1.0
"""
if self.m == 0:
return self.b
return self.m * x + self.b
class Ray:
"""
Geometric representation of ray objects.
Attributes
----------
o : Point
Origin (point where ray originates)
p : Point
Second point on the ray (not point where ray originates)
"""
def __init__(self, origin, second_p):
"""
Returns a ray with the values specified.
__init__(Point, Point) -> Ray
Parameters
----------
origin : the point where the ray originates
second_p : the second point specifying the ray (not the origin)
Attributes
----------
Examples
--------
>>> l = Ray(Point((0, 0)), Point((1, 0)))
>>> str(l.o)
'(0.0, 0.0)'
>>> str(l.p)
'(1.0, 0.0)'
"""
self.o = origin
self.p = second_p
class Chain(Geometry):
"""
Geometric representation of a chain, also known as a polyline.
Attributes
----------
vertices : list
List of Points of the vertices of the chain in order.
len : float
The geometric length of the chain.
"""
def __init__(self, vertices):
"""
Returns a chain created from the points specified.
__init__(Point list or list of Point lists) -> Chain
Parameters
----------
vertices : list -- Point list or list of Point lists.
Attributes
----------
Examples
--------
>>> c = Chain([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((2, 1))])
"""
if isinstance(vertices[0], list):
self._vertices = [part for part in vertices]
else:
self._vertices = [vertices]
self._reset_props()
@classmethod
def __from_geo_interface__(cls, geo):
if geo['type'].lower() == 'linestring':
verts = [Point(pt) for pt in geo['coordinates']]
elif geo['type'].lower() == 'multilinestring':
verts = [map(Point, part) for part in geo['coordinates']]
else:
raise TypeError('%r is not a Chain'%geo)
return cls(verts)
@property
def __geo_interface__(self):
if len(self.parts) == 1:
return {'type': 'LineString', 'coordinates': self.vertices}
else:
return {'type': 'MultiLineString', 'coordinates': self.parts}
def _reset_props(self):
"""
HELPER METHOD. DO NOT CALL.
Resets attributes which are functions of other attributes. The getters for these attributes (implemented as
properties) then recompute their values if they have been reset since the last call to the getter.
_reset_props() -> None
Attributes
----------
Examples
--------
>>> ls = Chain([Point((1, 2)), Point((5, 6))])
>>> ls._reset_props()
"""
self._len = None
self._arclen = None
self._bounding_box = None
@property
def vertices(self):
"""
Returns the vertices of the chain in clockwise order.
vertices -> Point list
Attributes
----------
Examples
--------
>>> c = Chain([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((2, 1))])
>>> verts = c.vertices
>>> len(verts)
4
"""
return sum([part for part in self._vertices], [])
@property
def parts(self):
"""
Returns the parts of the chain.
parts -> Point list
Attributes
----------
Examples
--------
>>> c = Chain([[Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))],[Point((2,1)),Point((2,2)),Point((1,2)),Point((1,1))]])
>>> len(c.parts)
2
"""
return [[v for v in part] for part in self._vertices]
@property
def bounding_box(self):
"""
Returns the bounding box of the chain.
bounding_box -> Rectangle
Attributes
----------
Examples
--------
>>> c = Chain([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1))])
>>> c.bounding_box.left
0.0
>>> c.bounding_box.lower
0.0
>>> c.bounding_box.right
2.0
>>> c.bounding_box.upper
1.0
"""
if self._bounding_box is None:
vertices = self.vertices
self._bounding_box = Rectangle(
min([v[0] for v in vertices]), min([v[1] for v in vertices]),
max([v[0] for v in vertices]), max([v[1] for v in vertices]))
return self._bounding_box
@property
def len(self):
"""
Returns the geometric length of the chain.
len -> number
Attributes
----------
Examples
--------
>>> c = Chain([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((2, 1))])
>>> c.len
3.0
>>> c = Chain([[Point((0, 0)), Point((1, 0)), Point((1, 1))],[Point((10,10)),Point((11,10)),Point((11,11))]])
>>> c.len
4.0
"""
def dist(v1, v2):
return math.hypot(v1[0] - v2[0], v1[1] - v2[1])
def part_perimeter(part):
return sum([dist(part[i], part[i + 1]) for i in xrange(len(part) - 1)])
if self._len is None:
self._len = sum([part_perimeter(part) for part in self._vertices])
return self._len
@property
def arclen(self):
"""
Returns the geometric length of the chain computed using arcdistance (meters).
len -> number
Attributes
----------
Examples
--------
"""
def part_perimeter(part):
return sum([arcdist(part[i], part[i + 1]) * 1000. for i in xrange(len(part) - 1)])
if self._arclen is None:
self._arclen = sum(
[part_perimeter(part) for part in self._vertices])
return self._arclen
@property
def segments(self):
"""
Returns the segments that compose the Chain
"""
return [[LineSegment(a, b) for (a, b) in zip(part[:-1], part[1:])] for part in self._vertices]
class Ring(Geometry):
"""
Geometric representation of a Linear Ring
Linear Rings must be closed, the first and last point must be the same. Open rings will be closed.
This class exists primarily as a geometric primitive to form complex polygons with multiple rings and holes.
The ordering of the vertices is ignored and will not be altered.
Parameters
----------
vertices : list -- a list of vertices
Attributes
__________
vertices : list
List of Points with the vertices of the ring
len : int
Number of vertices
perimeter : float
Geometric length of the perimeter of the ring
bounding_box : Rectangle
Bounding box of the ring
area : float
area enclosed by the ring
centroid : tuple
The centroid of the ring defined by the 'center of gravity' or 'center or mass'
"""
def __init__(self, vertices):
if vertices[0] != vertices[-1]:
vertices = vertices[:] + vertices[0:1]
#raise ValueError, "Supplied vertices do not form a closed ring, the first and last vertices are not the same"
self.vertices = tuple(vertices)
self._perimeter = None
self._bounding_box = None
self._area = None
self._centroid = None
def __len__(self):
return len(self.vertices)
@property
def len(self):
return len(self)
@staticmethod
def dist(v1, v2):
return math.hypot(v1[0] - v2[0], v1[1] - v2[1])
@property
def perimeter(self):
if self._perimeter is None:
dist = self.dist
v = self.vertices
self._perimeter = sum([dist(v[i], v[i + 1])
for i in xrange(-1, len(self) - 1)])
return self._perimeter
@property
def bounding_box(self):
"""
Returns the bounding box of the ring
bounding_box -> Rectangle
Examples
--------
>>> r = Ring([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1)), Point((0,0))])
>>> r.bounding_box.left
0.0
>>> r.bounding_box.lower
0.0
>>> r.bounding_box.right
2.0
>>> r.bounding_box.upper
1.0
"""
if self._bounding_box is None:
vertices = self.vertices
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
self._bounding_box = Rectangle(min(x), min(y), max(x), max(y))
return self._bounding_box
@property
def area(self):
"""
Returns the area of the ring.
area -> number
Examples
--------
>>> r = Ring([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1)), Point((0,0))])
>>> r.area
2.0
"""
return abs(self.signed_area)
@property
def signed_area(self):
if self._area is None:
vertices = self.vertices
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
N = len(self)
A = 0.0
for i in xrange(N - 1):
A += (x[i] + x[i + 1]) * \
(y[i] - y[i + 1])
A = A * 0.5
self._area = -A
return self._area
@property
def centroid(self):
"""
Returns the centroid of the ring.
centroid -> Point
Notes
-----
The centroid returned by this method is the geometric centroid.
Also known as the 'center of gravity' or 'center of mass'.
Examples
--------
>>> r = Ring([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1)), Point((0,0))])
>>> str(r.centroid)
'(1.0, 0.5)'
"""
if self._centroid is None:
vertices = self.vertices
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
A = self.signed_area
N = len(self)
cx = 0
cy = 0
for i in xrange(N - 1):
f = (x[i] * y[i + 1] - x[i + 1] * y[i])
cx += (x[i] + x[i + 1]) * f
cy += (y[i] + y[i + 1]) * f
cx = 1.0 / (6 * A) * cx
cy = 1.0 / (6 * A) * cy
self._centroid = Point((cx, cy))
return self._centroid
def contains_point(self, point):
"""
Point containment using winding number
Implementation based on: http://www.engr.colostate.edu/~dga/dga/papers/point_in_polygon.pdf
"""
x, y = point
# bbox check
if x < self.bounding_box.left:
return False
if x > self.bounding_box.right:
return False
if y < self.bounding_box.lower:
return False
if y > self.bounding_box.upper:
return False
rn = len(self.vertices)
xs = [ self.vertices[i][0] - point[0] for i in xrange(rn) ]
ys = [ self.vertices[i][1] - point[1] for i in xrange(rn) ]
w = 0
for i in xrange(len(self.vertices) - 1):
yi = ys[i]
yj = ys[i+1]
xi = xs[i]
xj = xs[i+1]
if yi*yj < 0:
r = xi + yi * (xj-xi) / (yi - yj)
if r > 0:
if yi < 0:
w += 1
else:
w -= 1
elif yi==0 and xi > 0:
if yj > 0:
w += 0.5
else:
w -= 0.5
elif yj == 0 and xj > 0:
if yi < 0:
w += 0.5
else:
w -= 0.5
if w==0:
return False
else:
return True
class Polygon(Geometry):
"""
Geometric representation of polygon objects.
Attributes
----------
vertices : list
List of Points with the vertices of the Polygon in
clockwise order
len : int
Number of vertices including holes
perimeter : float
Geometric length of the perimeter of the Polygon
bounding_box : Rectangle
Bounding box of the polygon
bbox : List
[left, lower, right, upper]
area : float
Area enclosed by the polygon
centroid : tuple
The 'center of gravity', i.e. the mean point of the polygon.
"""
def __init__(self, vertices, holes=None):
"""
Returns a polygon created from the objects specified.
__init__(Point list or list of Point lists, holes list ) -> Polygon
Parameters
----------
vertices : list -- a list of vertices or a list of lists of vertices.
holes : list -- a list of sub-polygons to be considered as holes.
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
"""
self._part_rings = []
self._hole_rings = []
def clockwise(part):
if standalone.is_clockwise(part):
return part[:]
else:
return part[::-1]
if isinstance(vertices[0], list):
self._part_rings = map(Ring, vertices)
self._vertices = [clockwise(part) for part in vertices]
else:
self._part_rings = [Ring(vertices)]
self._vertices = [clockwise(vertices)]
if holes is not None and holes != []:
if isinstance(holes[0], list):
self._hole_rings = map(Ring, holes)
self._holes = [clockwise(hole) for hole in holes]
else:
self._hole_rings = [Ring(holes)]
self._holes = [clockwise(holes)]
else:
self._holes = [[]]
self._reset_props()
@classmethod
def __from_geo_interface__(cls, geo):
"""
While pysal does not differentiate polygons and multipolygons GEOS,Shapely and geoJSON do.
In GEOS, etc, polygons may only have a single exterior ring, all other parts are holes.
MultiPolygons are simply a list of polygons.
"""
geo_type = geo['type'].lower()
if geo_type == 'multipolygon':
parts = []
holes = []
for polygon in geo['coordinates']:
verts = [[Point(pt) for pt in part] for part in polygon]
parts += verts[0:1]
holes += verts[1:]
if not holes:
holes = None
return cls(parts, holes)
else:
verts = [[Point(pt) for pt in part] for part in geo['coordinates']]
return cls(verts[0:1], verts[1:])
@property
def __geo_interface__(self):
if len(self.parts) > 1:
geo = {'type': 'MultiPolygon', 'coordinates': [[
part] for part in self.parts]}
if self._holes[0]:
geo['coordinates'][0] += self._holes
return geo
if self._holes[0]:
return {'type': 'Polygon', 'coordinates': self._vertices + self._holes}
else:
return {'type': 'Polygon', 'coordinates': self._vertices}
def _reset_props(self):
self._perimeter = None
self._bounding_box = None
self._bbox = None
self._area = None
self._centroid = None
self._len = None
def __len__(self):
return self.len
@property
def len(self):
"""
Returns the number of vertices in the polygon.
len -> int
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 0)), Point((0, 1)), Point((1, 1)), Point((1, 0))])
>>> p1.len
4
>>> len(p1)
4
"""
if self._len is None:
self._len = len(self.vertices)
return self._len
@property
def vertices(self):
"""
Returns the vertices of the polygon in clockwise order.
vertices -> Point list
Attributes
----------
Examples
--------
>>> p1 = Polygon([Point((0, 0)), Point((0, 1)), Point((1, 1)), Point((1, 0))])
>>> len(p1.vertices)
4
"""
return sum([part for part in self._vertices], []) + sum([part for part in self._holes], [])
@property
def holes(self):
"""
Returns the holes of the polygon in clockwise order.
holes -> Point list
Attributes
----------
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((10, 0)), Point((10, 10)), Point((0, 10))], [Point((1, 2)), Point((2, 2)), Point((2, 1)), Point((1, 1))])
>>> len(p.holes)
1
"""
return [[v for v in part] for part in self._holes]
@property
def parts(self):
"""
Returns the parts of the polygon in clockwise order.
parts -> Point list
Attributes
----------
Attributes
----------
Examples
--------
>>> p = Polygon([[Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))], [Point((2,1)),Point((2,2)),Point((1,2)),Point((1,1))]])
>>> len(p.parts)
2
"""
return [[v for v in part] for part in self._vertices]
@property
def perimeter(self):
"""
Returns the perimeter of the polygon.
perimeter() -> number
Attributes
----------
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
>>> p.perimeter
4.0
"""
def dist(v1, v2):
return math.hypot(v1[0] - v2[0], v1[1] - v2[1])
def part_perimeter(part):
return sum([dist(part[i], part[i + 1]) for i in xrange(-1, len(part) - 1)])
if self._perimeter is None:
self._perimeter = (sum([part_perimeter(part) for part in self._vertices]) +
sum([part_perimeter(hole) for hole in self._holes]))
return self._perimeter
@property
def bbox(self):
"""
Returns the bounding box of the polygon as a list
See also bounding_box
"""
if self._bbox is None:
self._bbox = [ self.bounding_box.left,
self.bounding_box.lower,
self.bounding_box.right,
self.bounding_box.upper]
return self._bbox
@property
def bounding_box(self):
"""
Returns the bounding box of the polygon.
bounding_box -> Rectangle
Attributes
----------
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((2, 0)), Point((2, 1)), Point((0, 1))])
>>> p.bounding_box.left
0.0
>>> p.bounding_box.lower
0.0
>>> p.bounding_box.right
2.0
>>> p.bounding_box.upper
1.0
"""
if self._bounding_box is None:
vertices = self.vertices
self._bounding_box = Rectangle(
min([v[0] for v in vertices]), min([v[1] for v in vertices]),
max([v[0] for v in vertices]), max([v[1] for v in vertices]))
return self._bounding_box
@property
def area(self):
"""
Returns the area of the polygon.
area -> number
Attributes
----------
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))])
>>> p.area
1.0
>>> p = Polygon([Point((0, 0)), Point((10, 0)), Point((10, 10)), Point((0, 10))],[Point((2,1)),Point((2,2)),Point((1,2)),Point((1,1))])
>>> p.area
99.0
"""
def part_area(part_verts):
area = 0
for i in xrange(-1, len(part_verts) - 1):
area += (part_verts[i][0] + part_verts[i + 1][0]) * \
(part_verts[i][1] - part_verts[i + 1][1])
area = area * 0.5
if area < 0:
area = -area
return area
return (sum([part_area(part) for part in self._vertices]) -
sum([part_area(hole) for hole in self._holes]))
@property
def centroid(self):
"""
Returns the centroid of the polygon
centroid -> Point
Notes
-----
The centroid returned by this method is the geometric centroid and respects multipart polygons with holes.
Also known as the 'center of gravity' or 'center of mass'.
Examples
--------
>>> p = Polygon([Point((0, 0)), Point((10, 0)), Point((10, 10)), Point((0, 10))], [Point((1, 1)), Point((1, 2)), Point((2, 2)), Point((2, 1))])
>>> p.centroid
(5.0353535353535355, 5.0353535353535355)
"""
CP = [ring.centroid for ring in self._part_rings]
AP = [ring.area for ring in self._part_rings]
CH = [ring.centroid for ring in self._hole_rings]
AH = [-ring.area for ring in self._hole_rings]
A = AP + AH
cx = sum([pt[0] * area for pt, area in zip(CP + CH, A)]) / sum(A)
cy = sum([pt[1] * area for pt, area in zip(CP + CH, A)]) / sum(A)
return cx, cy
def contains_point(self, point):
"""
Test if polygon contains point
Examples
--------
>>> p = Polygon([Point((0,0)), Point((4,0)), Point((4,5)), Point((2,3)), Point((0,5))])
>>> p.contains_point((3,3))
1
>>> p.contains_point((0,6))
0
>>> p.contains_point((2,2.9))
1
>>> p.contains_point((4,5))
0
>>> p.contains_point((4,0))
0
>>>
Handles holes
>>> p = Polygon([Point((0, 0)), Point((0, 10)), Point((10, 10)), Point((10, 0))], [Point((2, 2)), Point((4, 2)), Point((4, 4)), Point((2, 4))])
>>> p.contains_point((3.0,3.0))
False
>>> p.contains_point((1.0,1.0))
True
>>>
Notes
-----
Points falling exactly on polygon edges may yield unpredictable
results
"""
for ring in self._hole_rings:
if ring.contains_point(point):
return False
for ring in self._part_rings:
if ring.contains_point(point):
return True
return False
class Rectangle(Geometry):
"""
Geometric representation of rectangle objects.
Attributes
----------
left : float
Minimum x-value of the rectangle
lower : float
Minimum y-value of the rectangle
right : float
Maximum x-value of the rectangle
upper : float
Maximum y-value of the rectangle
"""
def __init__(self, left, lower, right, upper):
"""
Returns a Rectangle object.
__init__(number, number, number, number) -> Rectangle
Parameters
----------
left : the minimum x-value of the rectangle
lower : the minimum y-value of the rectangle
right : the maximum x-value of the rectangle
upper : the maximum y-value of the rectangle
Attributes
----------
Examples
--------
>>> r = Rectangle(-4, 3, 10, 17)
>>> r.left #minx
-4.0
>>> r.lower #miny
3.0
>>> r.right #maxx
10.0
>>> r.upper #maxy
17.0
"""
if right < left or upper < lower:
raise ArithmeticError('Rectangle must have positive area.')
self.left = float(left)
self.lower = float(lower)
self.right = float(right)
self.upper = float(upper)
def __nonzero__(self):
"""
___nonzero__ is used "to implement truth value testing and the built-in operation bool()" -- http://docs.python.org/reference/datamodel.html
Rectangles will evaluate to Flase if they have Zero Area.
>>> r = Rectangle(0,0,0,0)
>>> bool(r)
False
>>> r = Rectangle(0,0,1,1)
>>> bool(r)
True
"""
return bool(self.area)
def __eq__(self, other):
if other:
return self[:] == other[:]
return False
def __add__(self, other):
x, y, X, Y = self[:]
x1, y2, X1, Y1 = other[:]
return Rectangle(min(self.left, other.left), min(self.lower, other.lower), max(self.right, other.right), max(self.upper, other.upper))
def __getitem__(self, key):
"""
>>> r = Rectangle(-4, 3, 10, 17)
>>> r[:]
[-4.0, 3.0, 10.0, 17.0]
"""
l = [self.left, self.lower, self.right, self.upper]
return l.__getitem__(key)
def set_centroid(self, new_center):
"""
Moves the rectangle center to a new specified point.
set_centroid(Point) -> Point
Parameters
----------
new_center : the new location of the centroid of the polygon
Attributes
----------
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.set_centroid(Point((4, 4)))
>>> r.left
2.0
>>> r.right
6.0
>>> r.lower
2.0
>>> r.upper
6.0
"""
shift = (new_center[0] - (self.left + self.right) / 2,
new_center[1] - (self.lower + self.upper) / 2)
self.left = self.left + shift[0]
self.right = self.right + shift[0]
self.lower = self.lower + shift[1]
self.upper = self.upper + shift[1]
def set_scale(self, scale):
"""
Rescales the rectangle around its center.
set_scale(number) -> number
Parameters
----------
scale : the ratio of the new scale to the old scale (e.g. 1.0 is current size)
Attributes
----------
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.set_scale(2)
>>> r.left
-2.0
>>> r.right
6.0
>>> r.lower
-2.0
>>> r.upper
6.0
"""
center = ((self.left + self.right) / 2, (self.lower + self.upper) / 2)
self.left = center[0] + scale * (self.left - center[0])
self.right = center[0] + scale * (self.right - center[0])
self.lower = center[1] + scale * (self.lower - center[1])
self.upper = center[1] + scale * (self.upper - center[1])
@property
def area(self):
"""
Returns the area of the Rectangle.
area -> number
Attributes
----------
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.area
16.0
"""
return (self.right - self.left) * (self.upper - self.lower)
@property
def width(self):
"""
Returns the width of the Rectangle.
width -> number
Attributes
----------
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.width
4.0
"""
return self.right - self.left
@property
def height(self):
"""
Returns the height of the Rectangle.
height -> number
Examples
--------
>>> r = Rectangle(0, 0, 4, 4)
>>> r.height
4.0
"""
return self.upper - self.lower
_geoJSON_type_to_Pysal_type = {'point': Point, 'linestring': Chain, 'multilinestring': Chain,
'polygon': Polygon, 'multipolygon': Polygon}
import standalone # moving this to top breaks unit tests !
|
dataxu/ansible
|
refs/heads/dx-stable-2.5
|
test/integration/targets/aws_lambda/files/mini_lambda.py
|
139
|
from __future__ import print_function
import json
import os
def handler(event, context):
"""
The handler function is the function which gets called each time
the lambda is run.
"""
# printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
# the log entry.
print("got event:\n" + json.dumps(event))
# if the name parameter isn't present this can throw an exception
# which will result in an amazon chosen failure from the lambda
# which can be completely fine.
name = event["name"]
# we can use environment variables as part of the configuration of the lambda
# which can change the behaviour of the lambda without needing a new upload
extra = os.environ.get("EXTRA_MESSAGE")
if extra is not None and len(extra) > 0:
greeting = "hello {0}. {1}".format(name, extra)
else:
greeting = "hello " + name
return {"message": greeting}
def main():
"""
This main function will normally never be called during normal
lambda use. It is here for testing the lambda program only.
"""
event = {"name": "james"}
context = None
print(handler(event, context))
if __name__ == '__main__':
main()
|
mewtaylor/django
|
refs/heads/master
|
tests/auth_tests/test_tokens.py
|
297
|
import unittest
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.test import TestCase
from django.utils.six import PY3
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(PY3, "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.