repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
SportySpice/Collections
|
refs/heads/master
|
src/li/types/YoutubeCategoryVisual.py
|
1
|
class YoutubeCategoryVisual(object):
def __init__(self, textSettings, customTitle=None, ctHasPageNum=False):
self.textSettings = textSettings
self.customTitle = customTitle
self.ctHasPageNum = ctHasPageNum
def title(self, category, pageNum):
if self.customTitle:
title = self.customTitle
if self.ctHasPageNum:
title = title %pageNum
else:
title = category.title
title = self.textSettings.apply(title)
return title
|
hyqneuron/pylearn2-maxsom
|
refs/heads/master
|
pylearn2/scripts/print_monitor_cv.py
|
12
|
#!/usr/bin/env python
"""
Print (average) channel values for a collection of models, such as that
serialized by TrainCV. Based on print_monitor.py.
usage: print_monitor_cv.py model.pkl [-a]
"""
from __future__ import print_function
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
import argparse
import numpy as np
from pylearn2.utils import serial
def main(models, all=False):
"""
Print (average) final channel values for a collection of models.
Parameters
----------
models : list
Filename(s) for models to analyze.
all : bool, optional (default False)
Whether to output values for all models. If False, only averages
and standard deviations across all models are displayed.
"""
epochs = []
time = []
values = {}
for filename in np.atleast_1d(models):
this_models = serial.load(filename)
for model in list(this_models):
monitor = model.monitor
channels = monitor.channels
epochs.append(monitor._epochs_seen)
time.append(max(channels[key].time_record[-1] for key in channels))
for key in sorted(channels.keys()):
if key not in values:
values[key] = []
values[key].append(channels[key].val_record[-1])
n_models = len(epochs)
print('number of models: {0}'.format(n_models))
if n_models > 1:
if all:
print('\nepochs seen:\n{0}\n{1} +/- {2}'.format(np.asarray(epochs),
np.mean(epochs),
np.std(epochs)))
print('\ntraining time:\n{0}\n{1} +/- {2}'.format(np.asarray(time),
np.mean(time),
np.std(time)))
else:
print('epochs seen: {0} +/- {1}'.format(np.mean(epochs),
np.std(epochs)))
print('training time: {0} +/- {1}'.format(np.mean(time),
np.std(time)))
for key in sorted(values.keys()):
if all:
print('\n{0}:\n{1}\n{2} +/- {3}'.format(
key, np.asarray(values[key]),
np.mean(values[key]), np.std(values[key])))
else:
print('{0}: {1} +/- {2}'.format(key, np.mean(values[key]),
np.std(values[key])))
else:
print('epochs seen: {0}'.format(epochs[0]))
print('training time: {0}'.format(time[0]))
for key in sorted(values.keys()):
print('{0}: {1}'.format(key, values[key][0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('models', nargs='+',
help='Model or models to analyze.')
parser.add_argument('-a', '--all', action='store_true',
help='Print values for all models instead of ' +
'averages.')
args = parser.parse_args()
main(**vars(args))
|
equialgo/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/tests/test_nmf.py
|
28
|
import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm, fast_dot
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = fast_dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
|
Cyberbio-Lab/bcbio-nextgen
|
refs/heads/master
|
bcbio/pipeline/disambiguate/run.py
|
2
|
#!/usr/bin/env python
"""
This is the main function to call for disambiguating between BAM files
from two species that have alignments from the same source of fastq files.
It is part of the explant RNA/DNA-Seq workflow where an informatics
approach is used to distinguish between e.g. human and mouse or rat RNA/DNA reads.
For reads that have aligned to both organisms, the functionality is based on
comparing quality scores from either Tophat, Hisat2, STAR or BWA. Read
name is used to collect all alignments for both mates (_1 and _2) and
compared between the alignments from the two species.
For Tophat (default, can be changed using option -a) and Hisat2, the sum of the flags XO,
NM and NH is evaluated and the lowest sum wins the paired end reads. For equal
scores, the reads are assigned as ambiguous.
The alternative algorithm (STAR, bwa) disambiguates (for aligned reads) by tags
AS (alignment score, higher better), followed by NM (edit distance, lower
better).
Code by Miika Ahdesmaki July-August 2013, based on original Perl implementation
for Tophat by Zhongwu Lai.
Included in bcbio-nextgen from: https://github.com/mjafin/disambiguate
"""
from __future__ import print_function
import sys, re, pysam
from array import array
from os import path, makedirs
from argparse import ArgumentParser, RawTextHelpFormatter
# "natural comparison" for strings
def nat_cmp(a, b):
convert = lambda text: int(text) if text.isdigit() else text # lambda function to convert text to int if number present
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] # split string to piecewise strings and string numbers
#return cmp(alphanum_key(a), alphanum_key(b)) # use internal cmp to compare piecewise strings and numbers
return (alphanum_key(a) > alphanum_key(b))-(alphanum_key(a) < alphanum_key(b))
# read reads into a list object for as long as the read qname is constant (sorted file). Return the first read with new qname or None
def read_next_reads(fileobject, listobject):
qnamediff = False
while not qnamediff:
try:
myRead=fileobject.next()
except StopIteration:
#print("5")
return None # return None as the name of the new reads (i.e. no more new reads)
if nat_cmp(myRead.qname, listobject[0].qname)==0:
listobject.append(myRead)
else:
qnamediff = True
return myRead # this is the first read with a new qname
# disambiguate between two lists of reads
def disambiguate(humanlist, mouselist, disambalgo):
if disambalgo in ['tophat','hisat2']:
dv = 2**13 # a high quality score to replace missing quality scores (no real quality score should be this high)
sa = array('i',(dv for i in range(0,4))) # score array, with [human_1_QS, human_2_QS, mouse_1_QS, mouse_2_QS]
for read in humanlist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
QScore = read.opt('XO') + read.opt('NM') + read.opt('NH')
# directionality (_1 or _2)
d12 = 0 if 0x40&read.flag else 1
if sa[d12]>QScore:
sa[d12]=QScore # update to lowest (i.e. 'best') quality score
for read in mouselist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
QScore = read.opt('XO') + read.opt('NM') + read.opt('NH')
# directionality (_1 or _2)
d12 = 2 if 0x40&read.flag else 3
if sa[d12]>QScore:
sa[d12]=QScore # update to lowest (i.e. 'best') quality score
if min(sa[0:2])==min(sa[2:4]) and max(sa[0:2])==max(sa[2:4]): # ambiguous
return 0
elif min(sa[0:2]) < min(sa[2:4]) or min(sa[0:2]) == min(sa[2:4]) and max(sa[0:2]) < max(sa[2:4]):
# assign to human
return 1
else:
# assign to mouse
return -1
elif disambalgo.lower() in ('bwa', 'star'):
dv = -2^13 # default value, low
bwatags = ['AS', 'NM']# ,'XS'] # in order of importance (compared sequentially, not as a sum as for tophat)
bwatagsigns = [1, -1]#,1] # for AS and XS higher is better. for NM lower is better, thus multiply by -1
AS = list()
for x in range(0, len(bwatagsigns)):
AS.append(array('i',(dv for i in range(0,4)))) # alignment score array, with [human_1_Score, human_2_Score, mouse_1_Score, mouse_2_Score]
#
for read in humanlist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
# directionality (_1 or _2)
d12 = 0 if 0x40&read.flag else 1
for x in range(0, len(bwatagsigns)):
try:
QScore = bwatagsigns[x]*read.opt(bwatags[x])
except KeyError:
if bwatags[x] == 'NM':
bwatags[x] = 'nM' # oddity of STAR
elif bwatags[x] == 'AS':
continue # this can happen for e.g. hg38 ALT-alignments (missing AS)
QScore = bwatagsigns[x]*read.opt(bwatags[x])
if AS[x][d12]<QScore:
AS[x][d12]=QScore # update to highest (i.e. 'best') quality score
#
for read in mouselist:
if 0x4&read.flag: # flag 0x4 means unaligned
continue
# directionality (_1 or _2)
d12 = 2 if 0x40&read.flag else 3
for x in range(0, len(bwatagsigns)):
try:
QScore = bwatagsigns[x]*read.opt(bwatags[x])
except KeyError:
if bwatags[x] == 'NM':
bwatags[x] = 'nM' # oddity of STAR
elif bwatags[x] == 'AS':
continue # this can happen for e.g. hg38 ALT-alignments (missing AS)
QScore = bwatagsigns[x]*read.opt(bwatags[x])
if AS[x][d12]<QScore:
AS[x][d12]=QScore # update to highest (i.e. 'best') quality score
#
for x in range(0, len(bwatagsigns)):
if max(AS[x][0:2]) > max(AS[x][2:4]) or max(AS[x][0:2]) == max(AS[x][2:4]) and min(AS[x][0:2]) > min(AS[x][2:4]):
# assign to human
return 1
elif max(AS[x][0:2]) < max(AS[x][2:4]) or max(AS[x][0:2]) == max(AS[x][2:4]) and min(AS[x][0:2]) < min(AS[x][2:4]):
# assign to mouse
return -1
return 0 # ambiguous
else:
print("Not implemented yet")
sys.exit(2)
#code
def main(args):
numhum = nummou = numamb = 0
#starttime = time.clock()
# parse inputs
humanfilename = args.A
mousefilename = args.B
samplenameprefix = args.prefix
outputdir = args.output_dir
intermdir = args.intermediate_dir
disablesort = args.no_sort
disambalgo = args.aligner
supportedalgorithms = set(['tophat', 'hisat2', 'bwa', 'star'])
# check existence of input BAM files
if not (file_exists(humanfilename) and file_exists(mousefilename)):
sys.stderr.write("\nERROR in disambiguate.py: Two existing input BAM files "
"must be specified as positional arguments\n")
sys.exit(2)
if len(samplenameprefix) < 1:
humanprefix = path.basename(humanfilename.replace(".bam",""))
mouseprefix = path.basename(mousefilename.replace(".bam",""))
else:
if samplenameprefix.endswith(".bam"):
samplenameprefix = samplenameprefix[0:samplenameprefix.rfind(".bam")] # the above if is not stricly necessary for this to work
humanprefix = samplenameprefix
mouseprefix = samplenameprefix
samplenameprefix = None # clear variable
if disambalgo.lower() not in supportedalgorithms:
print(disambalgo+" is not a supported disambiguation scheme at the moment.")
sys.exit(2)
if disablesort:
humanfilenamesorted = humanfilename # assumed to be sorted externally...
mousefilenamesorted = mousefilename # assumed to be sorted externally...
else:
if not path.isdir(intermdir):
makedirs(intermdir)
humanfilenamesorted = path.join(intermdir,humanprefix+".speciesA.namesorted.bam")
mousefilenamesorted = path.join(intermdir,mouseprefix+".speciesB.namesorted.bam")
if not path.isfile(humanfilenamesorted):
pysam.sort("-n","-m","2000000000",humanfilename,humanfilenamesorted.replace(".bam",""))
if not path.isfile(mousefilenamesorted):
pysam.sort("-n","-m","2000000000",mousefilename,mousefilenamesorted.replace(".bam",""))
# read in human reads and form a dictionary
myHumanFile = pysam.Samfile(humanfilenamesorted, "rb" )
myMouseFile = pysam.Samfile(mousefilenamesorted, "rb" )
if not path.isdir(outputdir):
makedirs(outputdir)
myHumanUniqueFile = pysam.Samfile(path.join(outputdir, humanprefix+".disambiguatedSpeciesA.bam"), "wb", template=myHumanFile)
myHumanAmbiguousFile = pysam.Samfile(path.join(outputdir, humanprefix+".ambiguousSpeciesA.bam"), "wb", template=myHumanFile)
myMouseUniqueFile = pysam.Samfile(path.join(outputdir, mouseprefix+".disambiguatedSpeciesB.bam"), "wb", template=myMouseFile)
myMouseAmbiguousFile = pysam.Samfile(path.join(outputdir, mouseprefix+".ambiguousSpeciesB.bam"), "wb", template=myMouseFile)
summaryFile = open(path.join(outputdir,humanprefix+'_summary.txt'),'w')
#initialise
try:
nexthumread=myHumanFile.next()
nextmouread=myMouseFile.next()
except StopIteration:
print("No reads in one or either of the input files")
sys.exit(2)
EOFmouse = EOFhuman = False
prevHumID = '-+=RANDOMSTRING=+-'
prevMouID = '-+=RANDOMSTRING=+-'
while not EOFmouse&EOFhuman:
while not (nat_cmp(nexthumread.qname,nextmouread.qname) == 0):
# check order between current human and mouse qname (find a point where they're identical, i.e. in sync)
while nat_cmp(nexthumread.qname,nextmouread.qname) > 0 and not EOFmouse: # mouse is "behind" human, output to mouse disambiguous
myMouseUniqueFile.write(nextmouread)
if not nextmouread.qname == prevMouID:
nummou+=1 # increment mouse counter for unique only
prevMouID = nextmouread.qname
try:
nextmouread=myMouseFile.next()
except StopIteration:
EOFmouse=True
while nat_cmp(nexthumread.qname,nextmouread.qname) < 0 and not EOFhuman: # human is "behind" mouse, output to human disambiguous
myHumanUniqueFile.write(nexthumread)
if not nexthumread.qname == prevHumID:
numhum+=1 # increment human counter for unique only
prevHumID = nexthumread.qname
try:
nexthumread=myHumanFile.next()
except StopIteration:
EOFhuman=True
if EOFhuman or EOFmouse:
break
# at this point the read qnames are identical and/or we've reached EOF
humlist = list()
moulist = list()
if nat_cmp(nexthumread.qname,nextmouread.qname) == 0:
humlist.append(nexthumread)
nexthumread = read_next_reads(myHumanFile, humlist) # read more reads with same qname (the function modifies humlist directly)
if nexthumread == None:
EOFhuman = True
moulist.append(nextmouread)
nextmouread = read_next_reads(myMouseFile, moulist) # read more reads with same qname (the function modifies moulist directly)
if nextmouread == None:
EOFmouse = True
# perform comparison to check mouse, human or ambiguous
if len(moulist) > 0 and len(humlist) > 0:
myAmbiguousness = disambiguate(humlist, moulist, disambalgo)
if myAmbiguousness < 0: # mouse
nummou+=1 # increment mouse counter
for myRead in moulist:
myMouseUniqueFile.write(myRead)
elif myAmbiguousness > 0: # human
numhum+=1 # increment human counter
for myRead in humlist:
myHumanUniqueFile.write(myRead)
else: # ambiguous
numamb+=1 # increment ambiguous counter
for myRead in moulist:
myMouseAmbiguousFile.write(myRead)
for myRead in humlist:
myHumanAmbiguousFile.write(myRead)
if EOFhuman:
#flush the rest of the mouse reads
while not EOFmouse:
myMouseUniqueFile.write(nextmouread)
if not nextmouread.qname == prevMouID:
nummou+=1 # increment mouse counter for unique only
prevMouID = nextmouread.qname
try:
nextmouread=myMouseFile.next()
except StopIteration:
#print("3")
EOFmouse=True
if EOFmouse:
#flush the rest of the human reads
while not EOFhuman:
myHumanUniqueFile.write(nexthumread)
if not nexthumread.qname == prevHumID:
numhum+=1 # increment human counter for unique only
prevHumID = nexthumread.qname
try:
nexthumread=myHumanFile.next()
except StopIteration:
EOFhuman=True
summaryFile.write("sample\tunique species A pairs\tunique species B pairs\tambiguous pairs\n")
summaryFile.write(humanprefix+"\t"+str(numhum)+"\t"+str(nummou)+"\t"+str(numamb)+"\n")
summaryFile.close()
myHumanFile.close()
myMouseFile.close()
myHumanUniqueFile.close()
myHumanAmbiguousFile.close()
myMouseUniqueFile.close()
myMouseAmbiguousFile.close()
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
return path.exists(fname) and path.getsize(fname) > 0
if __name__ == "__main__":
description = """
disambiguate.py disambiguates between two organisms that have alignments
from the same source of fastq files. An example where this might be
useful is as part of an explant RNA/DNA-Seq workflow where an informatics
approach is used to distinguish between human and mouse RNA/DNA reads.
For reads that have aligned to both organisms, the functionality is based on
comparing quality scores from either Tophat of BWA. Read
name is used to collect all alignments for both mates (_1 and _2) and
compared between human and mouse alignments.
For Tophat (default, can be changed using option -a), the sum of the tags XO,
NM and NH is evaluated and the lowest sum wins the paired end reads. For equal
scores (both mates, both species), the reads are assigned as ambiguous.
The alternative algorithm (STAR, bwa) disambiguates (for aligned reads) by tags
AS (alignment score, higher better), followed by NM (edit distance, lower
better).
The output directory will contain four files:\n
...disambiguatedSpeciesA.bam: Reads that could be assigned to species A
...disambiguatedSpeciesB.bam: Reads that could be assigned to species B
...ambiguousSpeciesA.bam: Reads aligned to species A that also aligned \n\tto B but could not be uniquely assigned to either
...ambiguousSpeciesB.bam: Reads aligned to species B that also aligned \n\tto A but could not be uniquely assigned to either
..._summary.txt: A summary of unique read names assigned to species A, B \n\tand ambiguous.
Examples:
disambiguate.py test/human.bam test/mouse.bam
disambiguate.py -s mysample1 test/human.bam test/mouse.bam
"""
parser = ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
parser.add_argument('A', help='Input BAM file for species A.')
parser.add_argument('B', help='Input BAM file for species B.')
parser.add_argument('-o', '--output-dir', default="disambres",
help='Output directory.')
parser.add_argument('-i', '--intermediate-dir', default="intermfiles",
help='Location to store intermediate files')
parser.add_argument('-d', '--no-sort', action='store_true', default=False,
help='Disable BAM file sorting. Use this option if the '
'files have already been name sorted.')
parser.add_argument('-s', '--prefix', default='',
help='A prefix (e.g. sample name) to use for the output '
'BAM files. If not provided, the input BAM file prefix '
'will be used. Do not include .bam in the prefix.')
parser.add_argument('-a', '--aligner', default='tophat',
choices=('tophat', 'hisat2', 'bwa', 'star'),
help='The aligner used to generate these reads. Some '
'aligners set different tags.')
args = parser.parse_args()
main(args)
|
desec-io/desec-stack
|
refs/heads/20210701_security_headers
|
api/desecapi/migrations/0013_user_needs_captcha.py
|
1
|
# Generated by Django 3.1.5 on 2021-01-19 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('desecapi', '0012_rrset_label_length'),
]
operations = [
migrations.AddField(
model_name='user',
name='needs_captcha',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='user',
name='needs_captcha',
field=models.BooleanField(default=True),
),
]
|
xzturn/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/ackermann_test.py
|
21
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.framework import load_library
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class AckermannTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
library_filename = os.path.join(resource_loader.get_data_files_path(),
'ackermann_op.so')
ackermann = load_library.load_op_library(library_filename)
with self.cached_session():
self.assertEqual(ackermann.ackermann().eval(), b'A(m, 0) == A(m-1, 1)')
if __name__ == '__main__':
test.main()
|
BlindHunter/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/migrations/0003_a3.py
|
282
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lookuperror_c', '0002_c2'),
('lookuperror_b', '0002_b2'),
('lookuperror_a', '0002_a2'),
]
operations = [
migrations.CreateModel(
name='A3',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('b2', models.ForeignKey('lookuperror_b.B2', models.CASCADE)),
('c2', models.ForeignKey('lookuperror_c.C2', models.CASCADE)),
],
),
]
|
dylan-reeves/home_backup
|
refs/heads/master
|
backupclient-env/Lib/encodings/cp1252.py
|
272
|
""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
johnkeepmoving/oss-ftp
|
refs/heads/master
|
python27/win32/Lib/idlelib/RemoteObjectBrowser.py
|
127
|
from idlelib import rpc
def remote_object_tree_item(item):
wrapper = WrappedObjectTreeItem(item)
oid = id(wrapper)
rpc.objecttable[oid] = wrapper
return oid
class WrappedObjectTreeItem:
# Lives in PYTHON subprocess
def __init__(self, item):
self.__item = item
def __getattr__(self, name):
value = getattr(self.__item, name)
return value
def _GetSubList(self):
list = self.__item._GetSubList()
return map(remote_object_tree_item, list)
class StubObjectTreeItem:
# Lives in IDLE process
def __init__(self, sockio, oid):
self.sockio = sockio
self.oid = oid
def __getattr__(self, name):
value = rpc.MethodProxy(self.sockio, self.oid, name)
return value
def _GetSubList(self):
list = self.sockio.remotecall(self.oid, "_GetSubList", (), {})
return [StubObjectTreeItem(self.sockio, oid) for oid in list]
|
josephcslater/scipy
|
refs/heads/master
|
scipy/linalg/tests/test_solvers.py
|
5
|
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import TestCase, run_module_suite
from numpy.testing import assert_raises, assert_array_almost_equal
from numpy.testing.noseclasses import KnownFailureTest
from scipy.linalg import solve_sylvester
from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
from scipy.linalg import solve_continuous_are, solve_discrete_are
from scipy.linalg import block_diag, solve
class TestSolveLyapunov(TestCase):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal(
np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(
np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
def test_solve_continuous_are():
mat6 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'carex_6_data.npz'))
mat15 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'carex_15_data.npz'))
mat18 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'carex_18_data.npz'))
mat19 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'carex_19_data.npz'))
mat20 = np.load(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', 'carex_20_data.npz'))
cases = [
# Carex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# Test Case 0: carex #1
(np.diag([1.], 1),
np.array([[0], [1]]),
block_diag(1., 2.),
1,
None),
# Test Case 1: carex #2
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4.]]),
1,
None),
# Test Case 2: carex #3
(np.array([[0, 1, 0, 0],
[0, -1.89, 0.39, -5.53],
[0, -0.034, -2.98, 2.43],
[0.034, -0.0011, -0.99, -0.21]]),
np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
np.array([[2.313, 2.727, 0.688, 0.023],
[2.727, 4.271, 1.148, 0.323],
[0.688, 1.148, 0.313, 0.102],
[0.023, 0.323, 0.102, 0.083]]),
np.eye(2),
None),
# Test Case 3: carex #4
(np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
[0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
[0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
[0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
[0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
[0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
[0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
[0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
[-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
).T * 0.001,
np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
[0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
np.eye(2),
None),
# Test Case 4: carex #5
(np.array(
[[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
[-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
[-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
[-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
[-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
[0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
[0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
[0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
[0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
np.array([[0.010, -0.011, -0.151],
[0.003, -0.021, 0.000],
[0.009, -0.059, 0.000],
[0.024, -0.162, 0.000],
[0.068, -0.445, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
np.eye(9),
np.eye(3),
None),
# Test Case 5: carex #6
(mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
# Test Case 6: carex #7
(np.array([[1, 0], [0, -2.]]),
np.array([[1e-6], [0]]),
np.ones((2, 2)),
1.,
'Bad residual accuracy'),
# Test Case 7: carex #8
(block_diag(-0.1, -0.02),
np.array([[0.100, 0.000], [0.001, 0.010]]),
np.array([[100, 1000], [1000, 10000]]),
np.ones((2, 2)) + block_diag(1e-6, 0),
None),
# Test Case 8: carex #9
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1.]]),
np.eye(2),
1.,
None),
# Test Case 9: carex #10
(np.array([[1.0000001, 1], [1., 1.0000001]]),
np.eye(2),
np.eye(2),
np.eye(2),
None),
# Test Case 10: carex #11
(np.array([[3, 1.], [4, 2]]),
np.array([[1], [1]]),
np.array([[-11, -5], [-5, -2.]]),
1.,
None),
# Test Case 11: carex #12
(np.array([[7000000., 2000000., -0.],
[2000000., 6000000., -2000000.],
[0., -2000000., 5000000.]]) / 3,
np.eye(3),
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
np.diag([1e-6, 1, 1e6])).dot(
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
np.eye(3) * 1e6,
'Bad Residual Accuracy'),
# Test Case 12: carex #13
(np.array([[0, 0.4, 0, 0],
[0, 0, 0.345, 0],
[0, -0.524e6, -0.465e6, 0.262e6],
[0, 0, 0, -1e6]]),
np.array([[0, 0, 0, 1e6]]).T,
np.diag([1, 0, 1, 0]),
1.,
None),
# Test Case 13: carex #14
(np.array([[-1e-6, 1, 0, 0],
[-1, -1e-6, 0, 0],
[0, 0, 1e-6, 1],
[0, 0, -1, 1e-6]]),
np.ones((4, 1)),
np.ones((4, 4)),
1.,
None),
# Test Case 14: carex #15
(mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
# Test Case 15: carex #16
(np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
np.eye(64),
np.eye(64),
np.eye(64),
None),
# Test Case 16: carex #17
(np.diag(np.ones((20, )), 1),
np.flipud(np.eye(21, 1)),
np.eye(21, 1) * np.eye(21, 1).T,
1,
'Bad Residual Accuracy'),
# Test Case 17: carex #18
(mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
# Test Case 18: carex #19
(mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
'Bad Residual Accuracy'),
# Test Case 19: carex #20
(mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
'Bad Residual Accuracy')
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
None, 9, 14, 13, 14, None, 12, None, None)
def _test_factory(case, dec):
"""Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
a, b, q, r, knownfailure = case
if knownfailure:
raise KnownFailureTest(knownfailure)
x = solve_continuous_are(a, b, q, r)
res = x.dot(a) + a.conj().T.dot(x) + q
out_fact = x.dot(b)
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
yield _test_factory, case, min_decimal[ind]
def test_solve_discrete_are():
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
# [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# TEST CASE 0 : Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0], [1]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 1 :Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j], [1j]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 2 : Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# TEST CASE 3 : User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695], [0.05532739], [0.78934047]]),
np.eye(3),
np.eye(1),
None),
# TEST CASE 4 : darex #1
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4]]),
np.array([[1]]),
None),
# TEST CASE 5 : darex #2
(np.array([[0.9512, 0], [0, 0.9048]]),
np.array([[4.877, 4.877], [-1.1895, 3.569]]),
np.array([[0.005, 0], [0, 0.02]]),
np.array([[1/3, 0], [0, 3]]),
None),
# TEST CASE 6 : darex #3
(np.array([[2, -1], [1, 0]]),
np.array([[1], [0]]),
np.array([[0, 0], [0, 1]]),
np.array([[0]]),
None),
# TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1], [0, -1]]),
np.array([[1, 0], [2, 1]]),
np.array([[-4, -4], [-4, 7]]) * (1/11),
np.array([[9, 3], [3, 1]]),
None),
# TEST CASE 8 : darex #5
(np.array([[0, 1], [0, 0]]),
np.array([[0], [1]]),
np.array([[1, 2], [2, 4]]),
np.array([[1]]),
None),
# TEST CASE 9 : darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# TEST CASE 10 : darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# TEST CASE 11 : darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# TEST CASE 12 : darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# TEST CASE 13 : darex #10
(np.kron(np.eye(2), np.diag([1, 1], k=1)),
np.kron(np.eye(2), np.array([[0], [0], [1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0], [0, 1]]),
None),
# TEST CASE 14 : darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# TEST CASE 15 : darex #12 - numerically least accurate example
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1]]),
np.eye(2),
np.array([[1]]),
None),
# TEST CASE 16 : darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
None),
# TEST CASE 17 : darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08], [0], [0], [0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
None),
# TEST CASE 18 : darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 15, 13,
14, 13, 13, 14, 12, 2, 5, 6, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
raise KnownFailureTest(knownfailure)
x = solve_discrete_are(a, b, q, r)
res = a.conj().T.dot(x.dot(a)) - x + q
res -= a.conj().T.dot(x.dot(b)).dot(
solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
yield _test_factory, case, min_decimal[ind]
def test_solve_generalized_continuous_are():
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None)
]
min_decimal = (10, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
raise KnownFailureTest(knownfailure)
x = solve_continuous_are(a, b, q, r, e, s)
res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
out_fact = e.conj().T.dot(x).dot(b) + s
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
yield _test_factory, case, min_decimal[ind]
def test_solve_generalized_discrete_are():
mat20170120 = np.load(os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'data',
'gendare_20170120_data.npz'))
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None),
# user-reported (under PR-6616) 20-Jan-2017
# tests against the case where E is None but S is provided
(mat20170120['A'],
mat20170120['B'],
mat20170120['Q'],
mat20170120['R'],
None,
mat20170120['S'],
None),
]
min_decimal = (11, 11, 16)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
raise KnownFailureTest(knownfailure)
x = solve_discrete_are(a, b, q, r, e, s)
if e is None:
e = np.eye(a.shape[0])
if s is None:
s = np.zeros_like(b)
res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
res -= (a.conj().T.dot(x.dot(b)) + s).dot(
solve(r+b.conj().T.dot(x.dot(b)),
(b.conj().T.dot(x.dot(a)) + s.conj().T)
)
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
yield _test_factory, case, min_decimal[ind]
def test_are_validate_args():
def test_square_shape():
nsq = np.ones((3, 2))
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nsq, 1, 1, 1)
assert_raises(ValueError, x, sq, sq, nsq, 1)
assert_raises(ValueError, x, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
def test_compatible_sizes():
nsq = np.ones((3, 2))
sq = np.eye(4)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, nsq, 1, 1)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
def test_symmetry():
nsym = np.arange(9).reshape(3, 3)
sym = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sym, sym, nsym, sym)
assert_raises(ValueError, x, sym, sym, sym, nsym)
def test_singularity():
sing = 1e12 * np.ones((3, 3))
sing[2, 2] -= 1
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, sq, sq, sq, sing)
assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
def test_finiteness():
nm = np.ones((2, 2)) * np.nan
sq = np.eye(2)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nm, sq, sq, sq)
assert_raises(ValueError, x, sq, nm, sq, sq)
assert_raises(ValueError, x, sq, sq, nm, sq)
assert_raises(ValueError, x, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
class TestSolveSylvester(TestCase):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0],
[0, 1.0, 2.0, 0.0],
[0, 0, 3.0, -4],
[0, 0, 2, 5]]),
np.array([[2.0, 0, 0, 1.0],
[0, 1.0, 0.0, 0.0],
[0, 0, 1.0, -1],
[0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1, 1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
if __name__ == "__main__":
run_module_suite()
|
bakkou-badri/dataminingproject
|
refs/heads/master
|
env/lib/python2.7/site-packages/pkg_resources.py
|
134
|
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
|
mrrrgn/olympia
|
refs/heads/master
|
apps/users/management/__init__.py
|
12133432
| |
alfredodeza/boto
|
refs/heads/develop
|
tests/unit/manage/__init__.py
|
12133432
| |
sklnet/opendroid-enigma2
|
refs/heads/master
|
lib/python/Plugins/Extensions/__init__.py
|
12133432
| |
haeusser/tensorflow
|
refs/heads/master
|
tensorflow/python/util/protobuf/__init__.py
|
12133432
| |
Tejal011089/huntercamp_erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/leave_block_list_date/__init__.py
|
12133432
| |
JacobFischer/Joueur.py
|
refs/heads/master
|
_creer/games/${underscore(game_name)}/__init__.py
|
1
|
# DO NOT MODIFY THESE IMPORTS
from games.${underscore(game_name)}.ai import AI
from games.${underscore(game_name)}.game import Game
% for game_obj_key in sort_dict_keys(game_objs):
from games.${underscore(game_name)}.${underscore(game_obj_key)} import ${game_obj_key}
% endfor
${merge("# ", "init", "# if you need to initialize this module with custom logic do so here")}
|
egafford/sahara
|
refs/heads/master
|
sahara/service/edp/oozie/engine.py
|
2
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import xml.dom.minidom as xml
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from sahara import conductor as c
from sahara import context
from sahara.service.edp import base_engine
from sahara.service.edp.binary_retrievers import dispatch
from sahara.service.edp import hdfs_helper as h
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import oozie as o
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import remote
from sahara.utils import xmlutils as x
CONF = cfg.CONF
conductor = c.API
@six.add_metaclass(abc.ABCMeta)
class OozieJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
self.plugin = job_utils.get_plugin(self.cluster)
def get_remote_client(self):
return o.RemoteOozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster),
self.get_hdfs_user())
def get_client(self):
# by default engine will return standard oozie client implementation
return o.OozieClient(self.get_oozie_server_uri(self.cluster),
self.get_oozie_server(self.cluster))
def _get_oozie_job_params(self, hdfs_user, path_to_workflow,
oozie_params, use_hbase_lib,
scheduled_params=None, job_dir=None,
job_execution_type=None):
oozie_libpath_key = "oozie.libpath"
oozie_libpath = ""
rm_path = self.get_resource_manager_uri(self.cluster)
nn_path = self.get_name_node_uri(self.cluster)
hbase_common_lib_path = "%s%s" % (nn_path, h.HBASE_COMMON_LIB_PATH)
if use_hbase_lib:
if oozie_libpath_key in oozie_params:
oozie_libpath = "%s,%s" % (oozie_params.get(oozie_libpath_key,
""), hbase_common_lib_path)
else:
oozie_libpath = hbase_common_lib_path
if job_execution_type == "scheduled":
app_path = "oozie.coord.application.path"
job_parameters = {
"start": scheduled_params.get('start'),
"end": scheduled_params.get('end'),
"frequency": scheduled_params.get('frequency'),
"workflowAppUri": "%s%s" % (nn_path, job_dir),
app_path: "%s%s" % (nn_path, job_dir)}
else:
app_path = "oozie.wf.application.path"
job_parameters = {
app_path: "%s%s" % (nn_path, path_to_workflow)}
job_parameters["nameNode"] = nn_path
job_parameters["user.name"] = hdfs_user
job_parameters["jobTracker"] = rm_path
job_parameters[oozie_libpath_key] = oozie_libpath
job_parameters["oozie.use.system.libpath"] = "true"
# Don't let the application path be overwritten, that can't
# possibly make any sense
if app_path in oozie_params:
del oozie_params[app_path]
if oozie_libpath_key in oozie_params:
del oozie_params[oozie_libpath_key]
job_parameters.update(oozie_params)
return job_parameters
def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user)
return "%s/workflow.xml" % job_dir
def _upload_coordinator_file(self, where, job_dir, wf_xml, hdfs_user):
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, wf_xml, "coordinator.xml", job_dir,
hdfs_user)
return "%s/coordinator.xml" % job_dir
def cancel_job(self, job_execution):
if job_execution.engine_job_id is not None:
client = self.get_client()
client.kill_job(job_execution)
return client.get_job_info(job_execution)
def get_job_status(self, job_execution):
if job_execution.engine_job_id is not None:
return self.get_client().get_job_info(job_execution)
def _prepare_run_job(self, job_execution):
ctx = context.ctx()
# This will be a dictionary of tuples, (native_url, runtime_url)
# keyed by data_source id
data_source_urls = {}
prepared_job_params = {}
job = conductor.job_get(ctx, job_execution.job_id)
input_source, output_source = job_utils.get_data_sources(
job_execution, job, data_source_urls, self.cluster)
# Updated_job_configs will be a copy of job_execution.job_configs with
# any name or uuid references to data_sources resolved to paths
# assuming substitution is enabled.
# If substitution is not enabled then updated_job_configs will
# just be a reference to job_execution.job_configs to avoid a copy.
# Additional_sources will be a list of any data_sources found.
additional_sources, updated_job_configs = (
job_utils.resolve_data_source_references(job_execution.job_configs,
job_execution.id,
data_source_urls,
self.cluster)
)
job_execution = conductor.job_execution_update(
ctx, job_execution,
{"data_source_urls": job_utils.to_url_dict(data_source_urls)})
# Now that we've recorded the native urls, we can switch to the
# runtime urls
data_source_urls = job_utils.to_url_dict(data_source_urls,
runtime=True)
proxy_configs = updated_job_configs.get('proxy_configs')
configs = updated_job_configs.get('configs', {})
use_hbase_lib = configs.get('edp.hbase_common_lib', {})
# Extract all the 'oozie.' configs so that they can be set in the
# job properties file. These are config values for Oozie itself,
# not the job code
oozie_params = {}
for k in list(configs):
if k.startswith('oozie.'):
oozie_params[k] = configs[k]
for data_source in [input_source, output_source] + additional_sources:
if data_source and data_source.type == 'hdfs':
h.configure_cluster_for_hdfs(
self.cluster, data_source_urls[data_source.id])
break
external_hdfs_urls = self._resolve_external_hdfs_urls(
job_execution.job_configs)
for url in external_hdfs_urls:
h.configure_cluster_for_hdfs(self.cluster, url)
hdfs_user = self.get_hdfs_user()
# TODO(tmckay): this should probably be "get_namenode"
# but that call does not exist in the oozie engine api now.
oozie_server = self.get_oozie_server(self.cluster)
wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
proxy_configs)
wf_xml = workflow_factory.get_workflow_xml(
job, self.cluster, updated_job_configs,
input_source, output_source,
hdfs_user, data_source_urls)
path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
wf_xml, hdfs_user)
prepared_job_params['context'] = ctx
prepared_job_params['hdfs_user'] = hdfs_user
prepared_job_params['path_to_workflow'] = path_to_workflow
prepared_job_params['use_hbase_lib'] = use_hbase_lib
prepared_job_params['job_execution'] = job_execution
prepared_job_params['oozie_params'] = oozie_params
prepared_job_params['wf_dir'] = wf_dir
prepared_job_params['oozie_server'] = oozie_server
return prepared_job_params
def run_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
path_to_workflow = prepared_job_params['path_to_workflow']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
job_params = self._get_oozie_job_params(hdfs_user,
path_to_workflow,
oozie_params,
use_hbase_lib)
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
conductor.job_execution_update(
context.ctx(), job_execution.id,
{'info': {'status': edp.JOB_STATUS_READYTORUN},
'engine_job_id': oozie_job_id})
client.run_job(job_execution, oozie_job_id)
try:
status = client.get_job_info(job_execution, oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
def run_scheduled_job(self, job_execution):
prepared_job_params = self._prepare_run_job(job_execution)
oozie_server = prepared_job_params['oozie_server']
wf_dir = prepared_job_params['wf_dir']
hdfs_user = prepared_job_params['hdfs_user']
oozie_params = prepared_job_params['oozie_params']
use_hbase_lib = prepared_job_params['use_hbase_lib']
ctx = prepared_job_params['context']
job_execution = prepared_job_params['job_execution']
coord_configs = {"jobTracker": "${jobTracker}",
"nameNode": "${nameNode}"}
coord_xml = self._create_coordinator_xml(coord_configs)
self._upload_coordinator_file(oozie_server, wf_dir, coord_xml,
hdfs_user)
job_params = self._get_oozie_job_params(
hdfs_user, None, oozie_params, use_hbase_lib,
job_execution.job_configs.job_execution_info, wf_dir,
"scheduled")
client = self.get_client()
oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
job_execution)
job_execution = conductor.job_execution_get(ctx, job_execution.id)
if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
return (None, edp.JOB_STATUS_KILLED, None)
try:
status = client.get_job_status(job_execution,
oozie_job_id)['status']
except Exception:
status = None
return (oozie_job_id, status, None)
@abc.abstractmethod
def get_hdfs_user(self):
pass
@abc.abstractmethod
def create_hdfs_dir(self, remote, dir_name):
pass
@abc.abstractmethod
def get_oozie_server_uri(self, cluster):
pass
@abc.abstractmethod
def get_oozie_server(self, cluster):
pass
@abc.abstractmethod
def get_name_node_uri(self, cluster):
pass
@abc.abstractmethod
def get_resource_manager_uri(self, cluster):
pass
def validate_job_execution(self, cluster, job, data):
# Shell job type requires no specific fields
if job.type == edp.JOB_TYPE_SHELL:
return
# All other types except Java require input and output
# objects and Java require main class
if job.type == edp.JOB_TYPE_JAVA:
j.check_main_class_present(data, job)
else:
j.check_data_sources(data, job)
job_type, subtype = edp.split_job_type(job.type)
if job_type == edp.JOB_TYPE_MAPREDUCE and (
subtype == edp.JOB_SUBTYPE_STREAMING):
j.check_streaming_present(data, job)
@staticmethod
def get_possible_job_config(job_type):
return workflow_factory.get_possible_job_config(job_type)
@staticmethod
def get_supported_job_types():
return [edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_JAVA,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING,
edp.JOB_TYPE_PIG,
edp.JOB_TYPE_SHELL]
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs,
proxy_configs=None):
mains = job.mains or []
libs = job.libs or []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else ''
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
for main in mains:
raw_data = dispatch.get_raw_binary(
main, proxy_configs=proxy_configs, remote=r)
if isinstance(raw_data, dict) and raw_data["type"] == "path":
h.copy_from_local(r, raw_data['path'],
job_dir, hdfs_user)
else:
h.put_file_to_hdfs(r, raw_data, main.name,
job_dir, hdfs_user)
uploaded_paths.append(job_dir + '/' + main.name)
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
for lib in libs:
raw_data = dispatch.get_raw_binary(
lib, proxy_configs=proxy_configs, remote=remote)
if isinstance(raw_data, dict) and raw_data["type"] == "path":
h.copy_from_local(r, raw_data['path'],
lib_dir, hdfs_user)
else:
h.put_file_to_hdfs(r, raw_data, lib.name,
lib_dir, hdfs_user)
uploaded_paths.append(lib_dir + '/' + lib.name)
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir,
hdfs_user)
uploaded_paths.append(lib_dir + '/' + lib['name'])
return uploaded_paths
def _create_hdfs_workflow_dir(self, where, job):
constructed_dir = '/user/%s/' % self.get_hdfs_user()
constructed_dir = self._add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid())
with remote.get_remote(where) as r:
self.create_hdfs_dir(r, constructed_dir)
return constructed_dir
def _create_coordinator_xml(self, coord_configs, config_filter=None,
appname='coord'):
doc = xml.Document()
# Create the <coordinator-app> base element
coord = doc.createElement('coordinator-app')
coord.attributes['name'] = appname
coord.attributes['start'] = "${start}"
coord.attributes['end'] = "${end}"
coord.attributes['frequency'] = "${frequency}"
coord.attributes['timezone'] = 'UTC'
coord.attributes['xmlns'] = 'uri:oozie:coordinator:0.2'
doc.appendChild(coord)
action = doc.createElement('action')
workflow = doc.createElement('workflow')
coord.appendChild(action)
action.appendChild(workflow)
x.add_text_element_to_tag(doc, "workflow", 'app-path',
"${workflowAppUri}")
configuration = doc.createElement('configuration')
workflow.appendChild(configuration)
default_configs = []
if config_filter is not None:
default_configs = [cfg['name'] for cfg in config_filter]
for name in sorted(coord_configs):
if name in default_configs or config_filter is None:
x.add_property_to_configuration(doc, name, coord_configs[name])
# Return newly created XML
return doc.toprettyxml(indent=" ")
def _add_postfix(self, constructed_dir):
def _append_slash_if_needed(path):
if path[-1] != '/':
path += '/'
return path
constructed_dir = _append_slash_if_needed(constructed_dir)
if CONF.job_workflow_postfix:
constructed_dir = ''.join([str(constructed_dir),
str(CONF.job_workflow_postfix)])
return _append_slash_if_needed(constructed_dir)
def _resolve_external_hdfs_urls(self, job_configs):
external_hdfs_urls = []
for k, v in six.iteritems(job_configs.get('configs', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for k, v in six.iteritems(job_configs.get('params', {})):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
for v in job_configs.get('args', []):
if isinstance(v, six.string_types) and v.startswith("hdfs://"):
external_hdfs_urls.append(v)
return external_hdfs_urls
def suspend_job(self, job_execution):
return self._manage_job(job_execution, edp.JOB_ACTION_SUSPEND)
def _manage_job(self, job_execution, action):
if job_execution.oozie_job_id is not None:
client = self.get_client()
if action == edp.JOB_ACTION_SUSPEND:
client.suspend_job(job_execution)
return client.get_job_status(job_execution)
|
Pluto-tv/chromium-crosswalk
|
refs/heads/master
|
third_party/libxml/src/gentest.py
|
298
|
#!/usr/bin/python -u
#
# generate a tester program for the API
#
import sys
import os
import string
try:
import libxml2
except:
print "libxml2 python bindings not available, skipping testapi.c generation"
sys.exit(0)
if len(sys.argv) > 1:
srcPref = sys.argv[1] + '/'
else:
srcPref = ''
#
# Modules we want to skip in API test
#
skipped_modules = [ "SAX", "xlink", "threads", "globals",
"xmlmemory", "xmlversion", "xmlexports",
#deprecated
"DOCBparser",
]
#
# defines for each module
#
modules_defines = {
"HTMLparser": "LIBXML_HTML_ENABLED",
"catalog": "LIBXML_CATALOG_ENABLED",
"xmlreader": "LIBXML_READER_ENABLED",
"relaxng": "LIBXML_SCHEMAS_ENABLED",
"schemasInternals": "LIBXML_SCHEMAS_ENABLED",
"xmlschemas": "LIBXML_SCHEMAS_ENABLED",
"xmlschemastypes": "LIBXML_SCHEMAS_ENABLED",
"xpath": "LIBXML_XPATH_ENABLED",
"xpathInternals": "LIBXML_XPATH_ENABLED",
"xinclude": "LIBXML_XINCLUDE_ENABLED",
"xpointer": "LIBXML_XPTR_ENABLED",
"xmlregexp" : "LIBXML_REGEXP_ENABLED",
"xmlautomata" : "LIBXML_AUTOMATA_ENABLED",
"xmlsave" : "LIBXML_OUTPUT_ENABLED",
"DOCBparser" : "LIBXML_DOCB_ENABLED",
"xmlmodule" : "LIBXML_MODULES_ENABLED",
"pattern" : "LIBXML_PATTERN_ENABLED",
"schematron" : "LIBXML_SCHEMATRON_ENABLED",
}
#
# defines for specific functions
#
function_defines = {
"htmlDefaultSAXHandlerInit": "LIBXML_HTML_ENABLED",
"xmlSAX2EndElement" : "LIBXML_SAX1_ENABLED",
"xmlSAX2StartElement" : "LIBXML_SAX1_ENABLED",
"xmlSAXDefaultVersion" : "LIBXML_SAX1_ENABLED",
"UTF8Toisolat1" : "LIBXML_OUTPUT_ENABLED",
"xmlCleanupPredefinedEntities": "LIBXML_LEGACY_ENABLED",
"xmlInitializePredefinedEntities": "LIBXML_LEGACY_ENABLED",
"xmlSetFeature": "LIBXML_LEGACY_ENABLED",
"xmlGetFeature": "LIBXML_LEGACY_ENABLED",
"xmlGetFeaturesList": "LIBXML_LEGACY_ENABLED",
"xmlIOParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDTD": "LIBXML_VALID_ENABLED",
"xmlParseDoc": "LIBXML_SAX1_ENABLED",
"xmlParseMemory": "LIBXML_SAX1_ENABLED",
"xmlRecoverDoc": "LIBXML_SAX1_ENABLED",
"xmlParseFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverFile": "LIBXML_SAX1_ENABLED",
"xmlRecoverMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseFileWithData": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseMemory": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDoc": "LIBXML_SAX1_ENABLED",
"xmlSAXParseDTD": "LIBXML_SAX1_ENABLED",
"xmlSAXUserParseFile": "LIBXML_SAX1_ENABLED",
"xmlParseEntity": "LIBXML_SAX1_ENABLED",
"xmlParseExternalEntity": "LIBXML_SAX1_ENABLED",
"xmlSAXParseMemoryWithData": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemory": "LIBXML_SAX1_ENABLED",
"xmlParseBalancedChunkMemoryRecover": "LIBXML_SAX1_ENABLED",
"xmlSetupParserForBuffer": "LIBXML_SAX1_ENABLED",
"xmlStopParser": "LIBXML_PUSH_ENABLED",
"xmlAttrSerializeTxtContent": "LIBXML_OUTPUT_ENABLED",
"xmlSAXParseFile": "LIBXML_SAX1_ENABLED",
"xmlSAXParseEntity": "LIBXML_SAX1_ENABLED",
"xmlNewTextChild": "LIBXML_TREE_ENABLED",
"xmlNewDocRawNode": "LIBXML_TREE_ENABLED",
"xmlNewProp": "LIBXML_TREE_ENABLED",
"xmlReconciliateNs": "LIBXML_TREE_ENABLED",
"xmlValidateNCName": "LIBXML_TREE_ENABLED",
"xmlValidateNMToken": "LIBXML_TREE_ENABLED",
"xmlValidateName": "LIBXML_TREE_ENABLED",
"xmlNewChild": "LIBXML_TREE_ENABLED",
"xmlValidateQName": "LIBXML_TREE_ENABLED",
"xmlSprintfElementContent": "LIBXML_OUTPUT_ENABLED",
"xmlValidGetPotentialChildren" : "LIBXML_VALID_ENABLED",
"xmlValidGetValidElements" : "LIBXML_VALID_ENABLED",
"docbDefaultSAXHandlerInit" : "LIBXML_DOCB_ENABLED",
"xmlTextReaderPreservePattern" : "LIBXML_PATTERN_ENABLED",
}
#
# Some functions really need to be skipped for the tests.
#
skipped_functions = [
# block on I/O
"xmlFdRead", "xmlReadFd", "xmlCtxtReadFd",
"htmlFdRead", "htmlReadFd", "htmlCtxtReadFd",
"xmlReaderNewFd", "xmlReaderForFd",
"xmlIORead", "xmlReadIO", "xmlCtxtReadIO",
"htmlIORead", "htmlReadIO", "htmlCtxtReadIO",
"xmlReaderNewIO", "xmlBufferDump", "xmlNanoFTPConnect",
"xmlNanoFTPConnectTo", "xmlNanoHTTPMethod", "xmlNanoHTTPMethodRedir",
# Complex I/O APIs
"xmlCreateIOParserCtxt", "xmlParserInputBufferCreateIO",
"xmlRegisterInputCallbacks", "xmlReaderForIO",
"xmlOutputBufferCreateIO", "xmlRegisterOutputCallbacks",
"xmlSaveToIO", "xmlIOHTTPOpenW",
# library state cleanup, generate false leak informations and other
# troubles, heavillyb tested otherwise.
"xmlCleanupParser", "xmlRelaxNGCleanupTypes", "xmlSetListDoc",
"xmlSetTreeDoc", "xmlUnlinkNode",
# hard to avoid leaks in the tests
"xmlStrcat", "xmlStrncat", "xmlCatalogAddLocal", "xmlNewTextWriterDoc",
"xmlXPathNewValueTree", "xmlXPathWrapString",
# unimplemented
"xmlTextReaderReadInnerXml", "xmlTextReaderReadOuterXml",
"xmlTextReaderReadString",
# destructor
"xmlListDelete", "xmlOutputBufferClose", "xmlNanoFTPClose", "xmlNanoHTTPClose",
# deprecated
"xmlCatalogGetPublic", "xmlCatalogGetSystem", "xmlEncodeEntities",
"xmlNewGlobalNs", "xmlHandleEntity", "xmlNamespaceParseNCName",
"xmlNamespaceParseNSDef", "xmlNamespaceParseQName",
"xmlParseNamespace", "xmlParseQuotedString", "xmlParserHandleReference",
"xmlScanName",
"xmlDecodeEntities",
# allocators
"xmlMemFree",
# verbosity
"xmlCatalogSetDebug", "xmlShellPrintXPathError", "xmlShellPrintNode",
# Internal functions, no user space should really call them
"xmlParseAttribute", "xmlParseAttributeListDecl", "xmlParseName",
"xmlParseNmtoken", "xmlParseEntityValue", "xmlParseAttValue",
"xmlParseSystemLiteral", "xmlParsePubidLiteral", "xmlParseCharData",
"xmlParseExternalID", "xmlParseComment", "xmlParsePITarget", "xmlParsePI",
"xmlParseNotationDecl", "xmlParseEntityDecl", "xmlParseDefaultDecl",
"xmlParseNotationType", "xmlParseEnumerationType", "xmlParseEnumeratedType",
"xmlParseAttributeType", "xmlParseAttributeListDecl",
"xmlParseElementMixedContentDecl", "xmlParseElementChildrenContentDecl",
"xmlParseElementContentDecl", "xmlParseElementDecl", "xmlParseMarkupDecl",
"xmlParseCharRef", "xmlParseEntityRef", "xmlParseReference",
"xmlParsePEReference", "xmlParseDocTypeDecl", "xmlParseAttribute",
"xmlParseStartTag", "xmlParseEndTag", "xmlParseCDSect", "xmlParseContent",
"xmlParseElement", "xmlParseVersionNum", "xmlParseVersionInfo",
"xmlParseEncName", "xmlParseEncodingDecl", "xmlParseSDDecl",
"xmlParseXMLDecl", "xmlParseTextDecl", "xmlParseMisc",
"xmlParseExternalSubset", "xmlParserHandlePEReference",
"xmlSkipBlankChars",
]
#
# These functions have side effects on the global state
# and hence generate errors on memory allocation tests
#
skipped_memcheck = [ "xmlLoadCatalog", "xmlAddEncodingAlias",
"xmlSchemaInitTypes", "xmlNanoFTPProxy", "xmlNanoFTPScanProxy",
"xmlNanoHTTPScanProxy", "xmlResetLastError", "xmlCatalogConvert",
"xmlCatalogRemove", "xmlLoadCatalogs", "xmlCleanupCharEncodingHandlers",
"xmlInitCharEncodingHandlers", "xmlCatalogCleanup",
"xmlSchemaGetBuiltInType",
"htmlParseFile", "htmlCtxtReadFile", # loads the catalogs
"xmlTextReaderSchemaValidate", "xmlSchemaCleanupTypes", # initialize the schemas type system
"xmlCatalogResolve", "xmlIOParseDTD" # loads the catalogs
]
#
# Extra code needed for some test cases
#
extra_pre_call = {
"xmlSAXUserParseFile": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlSAXUserParseMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemory": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParseBalancedChunkMemoryRecover": """
#ifdef LIBXML_SAX1_ENABLED
if (sax == (xmlSAXHandlerPtr)&xmlDefaultSAXHandler) user_data = NULL;
#endif
""",
"xmlParserInputBufferCreateFd":
"if (fd >= 0) fd = -1;",
}
extra_post_call = {
"xmlAddChild":
"if (ret_val == NULL) { xmlFreeNode(cur) ; cur = NULL ; }",
"xmlAddEntity":
"if (ret_val != NULL) { xmlFreeNode(ret_val) ; ret_val = NULL; }",
"xmlAddChildList":
"if (ret_val == NULL) { xmlFreeNodeList(cur) ; cur = NULL ; }",
"xmlAddSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddNextSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlAddPrevSibling":
"if (ret_val == NULL) { xmlFreeNode(elem) ; elem = NULL ; }",
"xmlDocSetRootElement":
"if (doc == NULL) { xmlFreeNode(root) ; root = NULL ; }",
"xmlReplaceNode":
"""if (cur != NULL) {
xmlUnlinkNode(cur);
xmlFreeNode(cur) ; cur = NULL ; }
if (old != NULL) {
xmlUnlinkNode(old);
xmlFreeNode(old) ; old = NULL ; }
ret_val = NULL;""",
"xmlTextMerge":
"""if ((first != NULL) && (first->type != XML_TEXT_NODE)) {
xmlUnlinkNode(second);
xmlFreeNode(second) ; second = NULL ; }""",
"xmlBuildQName":
"""if ((ret_val != NULL) && (ret_val != ncname) &&
(ret_val != prefix) && (ret_val != memory))
xmlFree(ret_val);
ret_val = NULL;""",
"xmlNewDocElementContent":
"""xmlFreeDocElementContent(doc, ret_val); ret_val = NULL;""",
"xmlDictReference": "xmlDictFree(dict);",
# Functions which deallocates one of their parameters
"xmlXPathConvertBoolean": """val = NULL;""",
"xmlXPathConvertNumber": """val = NULL;""",
"xmlXPathConvertString": """val = NULL;""",
"xmlSaveFileTo": """buf = NULL;""",
"xmlSaveFormatFileTo": """buf = NULL;""",
"xmlIOParseDTD": "input = NULL;",
"xmlRemoveProp": "cur = NULL;",
"xmlNewNs": "if ((node == NULL) && (ret_val != NULL)) xmlFreeNs(ret_val);",
"xmlCopyNamespace": "if (ret_val != NULL) xmlFreeNs(ret_val);",
"xmlCopyNamespaceList": "if (ret_val != NULL) xmlFreeNsList(ret_val);",
"xmlNewTextWriter": "if (ret_val != NULL) out = NULL;",
"xmlNewTextWriterPushParser": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;} if (ret_val != NULL) ctxt = NULL;",
"xmlNewIOInputStream": "if (ret_val != NULL) input = NULL;",
"htmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"htmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseDocument": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseChunk": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlParseExtParsedEnt": "if (ctxt != NULL) {xmlFreeDoc(ctxt->myDoc); ctxt->myDoc = NULL;}",
"xmlDOMWrapAdoptNode": "if ((node != NULL) && (node->parent == NULL)) {xmlUnlinkNode(node);xmlFreeNode(node);node = NULL;}",
"xmlBufferSetAllocationScheme": "if ((buf != NULL) && (scheme == XML_BUFFER_ALLOC_IMMUTABLE) && (buf->content != NULL) && (buf->content != static_buf_content)) { xmlFree(buf->content); buf->content = NULL;}"
}
modules = []
def is_skipped_module(name):
for mod in skipped_modules:
if mod == name:
return 1
return 0
def is_skipped_function(name):
for fun in skipped_functions:
if fun == name:
return 1
# Do not test destructors
if string.find(name, 'Free') != -1:
return 1
return 0
def is_skipped_memcheck(name):
for fun in skipped_memcheck:
if fun == name:
return 1
return 0
missing_types = {}
def add_missing_type(name, func):
try:
list = missing_types[name]
list.append(func)
except:
missing_types[name] = [func]
generated_param_types = []
def add_generated_param_type(name):
generated_param_types.append(name)
generated_return_types = []
def add_generated_return_type(name):
generated_return_types.append(name)
missing_functions = {}
missing_functions_nr = 0
def add_missing_functions(name, module):
global missing_functions_nr
missing_functions_nr = missing_functions_nr + 1
try:
list = missing_functions[module]
list.append(name)
except:
missing_functions[module] = [name]
#
# Provide the type generators and destructors for the parameters
#
def type_convert(str, name, info, module, function, pos):
# res = string.replace(str, " ", " ")
# res = string.replace(str, " ", " ")
# res = string.replace(str, " ", " ")
res = string.replace(str, " *", "_ptr")
# res = string.replace(str, "*", "_ptr")
res = string.replace(res, " ", "_")
if res == 'const_char_ptr':
if string.find(name, "file") != -1 or \
string.find(name, "uri") != -1 or \
string.find(name, "URI") != -1 or \
string.find(info, "filename") != -1 or \
string.find(info, "URI") != -1 or \
string.find(info, "URL") != -1:
if string.find(function, "Save") != -1 or \
string.find(function, "Create") != -1 or \
string.find(function, "Write") != -1 or \
string.find(function, "Fetch") != -1:
return('fileoutput')
return('filepath')
if res == 'void_ptr':
if module == 'nanoftp' and name == 'ctx':
return('xmlNanoFTPCtxtPtr')
if function == 'xmlNanoFTPNewCtxt' or \
function == 'xmlNanoFTPConnectTo' or \
function == 'xmlNanoFTPOpen':
return('xmlNanoFTPCtxtPtr')
if module == 'nanohttp' and name == 'ctx':
return('xmlNanoHTTPCtxtPtr')
if function == 'xmlNanoHTTPMethod' or \
function == 'xmlNanoHTTPMethodRedir' or \
function == 'xmlNanoHTTPOpen' or \
function == 'xmlNanoHTTPOpenRedir':
return('xmlNanoHTTPCtxtPtr');
if function == 'xmlIOHTTPOpen':
return('xmlNanoHTTPCtxtPtr')
if string.find(name, "data") != -1:
return('userdata')
if string.find(name, "user") != -1:
return('userdata')
if res == 'xmlDoc_ptr':
res = 'xmlDocPtr'
if res == 'xmlNode_ptr':
res = 'xmlNodePtr'
if res == 'xmlDict_ptr':
res = 'xmlDictPtr'
if res == 'xmlNodePtr' and pos != 0:
if (function == 'xmlAddChild' and pos == 2) or \
(function == 'xmlAddChildList' and pos == 2) or \
(function == 'xmlAddNextSibling' and pos == 2) or \
(function == 'xmlAddSibling' and pos == 2) or \
(function == 'xmlDocSetRootElement' and pos == 2) or \
(function == 'xmlReplaceNode' and pos == 2) or \
(function == 'xmlTextMerge') or \
(function == 'xmlAddPrevSibling' and pos == 2):
return('xmlNodePtr_in');
if res == 'const xmlBufferPtr':
res = 'xmlBufferPtr'
if res == 'xmlChar_ptr' and name == 'name' and \
string.find(function, "EatName") != -1:
return('eaten_name')
if res == 'void_ptr*':
res = 'void_ptr_ptr'
if res == 'char_ptr*':
res = 'char_ptr_ptr'
if res == 'xmlChar_ptr*':
res = 'xmlChar_ptr_ptr'
if res == 'const_xmlChar_ptr*':
res = 'const_xmlChar_ptr_ptr'
if res == 'const_char_ptr*':
res = 'const_char_ptr_ptr'
if res == 'FILE_ptr' and module == 'debugXML':
res = 'debug_FILE_ptr';
if res == 'int' and name == 'options':
if module == 'parser' or module == 'xmlreader':
res = 'parseroptions'
return res
known_param_types = []
def is_known_param_type(name, rtype):
global test
for type in known_param_types:
if type == name:
return 1
for type in generated_param_types:
if type == name:
return 1
if name[-3:] == 'Ptr' or name[-4:] == '_ptr':
if rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
define = 0
if modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""
#define gen_nb_%s 1
static %s gen_%s(int no ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
return(NULL);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, crtype, name, name, rtype))
if define == 1:
test.write("#endif\n\n")
add_generated_param_type(name)
return 1
return 0
#
# Provide the type destructors for the return values
#
known_return_types = []
def is_known_return_type(name):
for type in known_return_types:
if type == name:
return 1
return 0
#
# Copy the beginning of the C test program result
#
try:
input = open("testapi.c", "r")
except:
input = open(srcPref + "testapi.c", "r")
test = open('testapi.c.new', 'w')
def compare_and_save():
global test
test.close()
try:
input = open("testapi.c", "r").read()
except:
input = ''
test = open('testapi.c.new', "r").read()
if input != test:
try:
os.system("rm testapi.c; mv testapi.c.new testapi.c")
except:
os.system("mv testapi.c.new testapi.c")
print("Updated testapi.c")
else:
print("Generated testapi.c is identical")
line = input.readline()
while line != "":
if line == "/* CUT HERE: everything below that line is generated */\n":
break;
if line[0:15] == "#define gen_nb_":
type = string.split(line[15:])[0]
known_param_types.append(type)
if line[0:19] == "static void desret_":
type = string.split(line[19:], '(')[0]
known_return_types.append(type)
test.write(line)
line = input.readline()
input.close()
if line == "":
print "Could not find the CUT marker in testapi.c skipping generation"
test.close()
sys.exit(0)
print("Scanned testapi.c: found %d parameters types and %d return types\n" % (
len(known_param_types), len(known_return_types)))
test.write("/* CUT HERE: everything below that line is generated */\n")
#
# Open the input API description
#
doc = libxml2.readFile(srcPref + 'doc/libxml2-api.xml', None, 0)
if doc == None:
print "Failed to load doc/libxml2-api.xml"
sys.exit(1)
ctxt = doc.xpathNewContext()
#
# Generate a list of all function parameters and select only
# those used in the api tests
#
argtypes = {}
args = ctxt.xpathEval("/api/symbols/function/arg")
for arg in args:
mod = arg.xpathEval('string(../@file)')
func = arg.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = arg.xpathEval('string(@type)')
if not argtypes.has_key(type):
argtypes[type] = func
# similarly for return types
rettypes = {}
rets = ctxt.xpathEval("/api/symbols/function/return")
for ret in rets:
mod = ret.xpathEval('string(../@file)')
func = ret.xpathEval('string(../@name)')
if (mod not in skipped_modules) and (func not in skipped_functions):
type = ret.xpathEval('string(@type)')
if not rettypes.has_key(type):
rettypes[type] = func
#
# Generate constructors and return type handling for all enums
# which are used as function parameters
#
enums = ctxt.xpathEval("/api/symbols/typedef[@type='enum']")
for enum in enums:
module = enum.xpathEval('string(@file)')
name = enum.xpathEval('string(@name)')
#
# Skip any enums which are not in our filtered lists
#
if (name == None) or ((name not in argtypes) and (name not in rettypes)):
continue;
define = 0
if argtypes.has_key(name) and is_known_param_type(name, name) == 0:
values = ctxt.xpathEval("/api/symbols/enum[@type='%s']" % name)
i = 0
vals = []
for value in values:
vname = value.xpathEval('string(@name)')
if vname == None:
continue;
i = i + 1
if i >= 5:
break;
vals.append(vname)
if vals == []:
print "Didn't find any value for enum %s" % (name)
continue
if modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("#define gen_nb_%s %d\n" % (name, len(vals)))
test.write("""static %s gen_%s(int no, int nr ATTRIBUTE_UNUSED) {\n""" %
(name, name))
i = 1
for value in vals:
test.write(" if (no == %d) return(%s);\n" % (i, value))
i = i + 1
test.write(""" return(0);
}
static void des_%s(int no ATTRIBUTE_UNUSED, %s val ATTRIBUTE_UNUSED, int nr ATTRIBUTE_UNUSED) {
}
""" % (name, name));
known_param_types.append(name)
if (is_known_return_type(name) == 0) and (name in rettypes):
if define == 0 and modules_defines.has_key(module):
test.write("#ifdef %s\n" % (modules_defines[module]))
define = 1
test.write("""static void desret_%s(%s val ATTRIBUTE_UNUSED) {
}
""" % (name, name))
known_return_types.append(name)
if define == 1:
test.write("#endif\n\n")
#
# Load the interfaces
#
headers = ctxt.xpathEval("/api/files/file")
for file in headers:
name = file.xpathEval('string(@name)')
if (name == None) or (name == ''):
continue
#
# Some module may be skipped because they don't really consists
# of user callable APIs
#
if is_skipped_module(name):
continue
#
# do not test deprecated APIs
#
desc = file.xpathEval('string(description)')
if string.find(desc, 'DEPRECATED') != -1:
print "Skipping deprecated interface %s" % name
continue;
test.write("#include <libxml/%s.h>\n" % name)
modules.append(name)
#
# Generate the callers signatures
#
for module in modules:
test.write("static int test_%s(void);\n" % module);
#
# Generate the top caller
#
test.write("""
/**
* testlibxml2:
*
* Main entry point of the tester for the full libxml2 module,
* it calls all the tester entry point for each module.
*
* Returns the number of error found
*/
static int
testlibxml2(void)
{
int test_ret = 0;
""")
for module in modules:
test.write(" test_ret += test_%s();\n" % module)
test.write("""
printf("Total: %d functions, %d tests, %d errors\\n",
function_tests, call_tests, test_ret);
return(test_ret);
}
""")
#
# How to handle a function
#
nb_tests = 0
def generate_test(module, node):
global test
global nb_tests
nb_cond = 0
no_gen = 0
name = node.xpathEval('string(@name)')
if is_skipped_function(name):
return
#
# check we know how to handle the args and return values
# and store the informations for the generation
#
try:
args = node.xpathEval("arg")
except:
args = []
t_args = []
n = 0
for arg in args:
n = n + 1
rtype = arg.xpathEval("string(@type)")
if rtype == 'void':
break;
info = arg.xpathEval("string(@info)")
nam = arg.xpathEval("string(@name)")
type = type_convert(rtype, nam, info, module, name, n)
if is_known_param_type(type, rtype) == 0:
add_missing_type(type, name);
no_gen = 1
if (type[-3:] == 'Ptr' or type[-4:] == '_ptr') and \
rtype[0:6] == 'const ':
crtype = rtype[6:]
else:
crtype = rtype
t_args.append((nam, type, rtype, crtype, info))
try:
rets = node.xpathEval("return")
except:
rets = []
t_ret = None
for ret in rets:
rtype = ret.xpathEval("string(@type)")
info = ret.xpathEval("string(@info)")
type = type_convert(rtype, 'return', info, module, name, 0)
if rtype == 'void':
break
if is_known_return_type(type) == 0:
add_missing_type(type, name);
no_gen = 1
t_ret = (type, rtype, info)
break
test.write("""
static int
test_%s(void) {
int test_ret = 0;
""" % (name))
if no_gen == 1:
add_missing_functions(name, module)
test.write("""
/* missing type support */
return(test_ret);
}
""")
return
try:
conds = node.xpathEval("cond")
for cond in conds:
test.write("#if %s\n" % (cond.get_content()))
nb_cond = nb_cond + 1
except:
pass
define = 0
if function_defines.has_key(name):
test.write("#ifdef %s\n" % (function_defines[name]))
define = 1
# Declare the memory usage counter
no_mem = is_skipped_memcheck(name)
if no_mem == 0:
test.write(" int mem_base;\n");
# Declare the return value
if t_ret != None:
test.write(" %s ret_val;\n" % (t_ret[1]))
# Declare the arguments
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# add declaration
test.write(" %s %s; /* %s */\n" % (crtype, nam, info))
test.write(" int n_%s;\n" % (nam))
test.write("\n")
# Cascade loop on of each argument list of values
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" for (n_%s = 0;n_%s < gen_nb_%s;n_%s++) {\n" % (
nam, nam, type, nam))
# log the memory usage
if no_mem == 0:
test.write(" mem_base = xmlMemBlocks();\n");
# prepare the call
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
#
test.write(" %s = gen_%s(n_%s, %d);\n" % (nam, type, nam, i))
i = i + 1;
# do the call, and clanup the result
if extra_pre_call.has_key(name):
test.write(" %s\n"% (extra_pre_call[name]))
if t_ret != None:
test.write("\n ret_val = %s(" % (name))
need = 0
for arg in t_args:
(nam, type, rtype, crtype, info) = arg
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam);
test.write(");\n")
if extra_post_call.has_key(name):
test.write(" %s\n"% (extra_post_call[name]))
test.write(" desret_%s(ret_val);\n" % t_ret[0])
else:
test.write("\n %s(" % (name));
need = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
if need:
test.write(", ")
else:
need = 1
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s" % nam)
test.write(");\n")
if extra_post_call.has_key(name):
test.write(" %s\n"% (extra_post_call[name]))
test.write(" call_tests++;\n");
# Free the arguments
i = 0;
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
# This is a hack to prevent generating a destructor for the
# 'input' argument in xmlTextReaderSetup. There should be
# a better, more generic way to do this!
if string.find(info, 'destroy') == -1:
test.write(" des_%s(n_%s, " % (type, nam))
if rtype != crtype:
test.write("(%s)" % rtype)
test.write("%s, %d);\n" % (nam, i))
i = i + 1;
test.write(" xmlResetLastError();\n");
# Check the memory usage
if no_mem == 0:
test.write(""" if (mem_base != xmlMemBlocks()) {
printf("Leak of %%d blocks found in %s",
xmlMemBlocks() - mem_base);
test_ret++;
""" % (name));
for arg in t_args:
(nam, type, rtype, crtype, info) = arg;
test.write(""" printf(" %%d", n_%s);\n""" % (nam))
test.write(""" printf("\\n");\n""")
test.write(" }\n")
for arg in t_args:
test.write(" }\n")
test.write(" function_tests++;\n")
#
# end of conditional
#
while nb_cond > 0:
test.write("#endif\n")
nb_cond = nb_cond -1
if define == 1:
test.write("#endif\n")
nb_tests = nb_tests + 1;
test.write("""
return(test_ret);
}
""")
#
# Generate all module callers
#
for module in modules:
# gather all the functions exported by that module
try:
functions = ctxt.xpathEval("/api/symbols/function[@file='%s']" % (module))
except:
print "Failed to gather functions from module %s" % (module)
continue;
# iterate over all functions in the module generating the test
i = 0
nb_tests_old = nb_tests
for function in functions:
i = i + 1
generate_test(module, function);
# header
test.write("""static int
test_%s(void) {
int test_ret = 0;
if (quiet == 0) printf("Testing %s : %d of %d functions ...\\n");
""" % (module, module, nb_tests - nb_tests_old, i))
# iterate over all functions in the module generating the call
for function in functions:
name = function.xpathEval('string(@name)')
if is_skipped_function(name):
continue
test.write(" test_ret += test_%s();\n" % (name))
# footer
test.write("""
if (test_ret != 0)
printf("Module %s: %%d errors\\n", test_ret);
return(test_ret);
}
""" % (module))
#
# Generate direct module caller
#
test.write("""static int
test_module(const char *module) {
""");
for module in modules:
test.write(""" if (!strcmp(module, "%s")) return(test_%s());\n""" % (
module, module))
test.write(""" return(0);
}
""");
print "Generated test for %d modules and %d functions" %(len(modules), nb_tests)
compare_and_save()
missing_list = []
for missing in missing_types.keys():
if missing == 'va_list' or missing == '...':
continue;
n = len(missing_types[missing])
missing_list.append((n, missing))
def compare_missing(a, b):
return b[0] - a[0]
missing_list.sort(compare_missing)
print "Missing support for %d functions and %d types see missing.lst" % (missing_functions_nr, len(missing_list))
lst = open("missing.lst", "w")
lst.write("Missing support for %d types" % (len(missing_list)))
lst.write("\n")
for miss in missing_list:
lst.write("%s: %d :" % (miss[1], miss[0]))
i = 0
for n in missing_types[miss[1]]:
i = i + 1
if i > 5:
lst.write(" ...")
break
lst.write(" %s" % (n))
lst.write("\n")
lst.write("\n")
lst.write("\n")
lst.write("Missing support per module");
for module in missing_functions.keys():
lst.write("module %s:\n %s\n" % (module, missing_functions[module]))
lst.close()
|
EvanK/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_firewall_schedule_group.py
|
24
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_schedule_group
short_description: Schedule group configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall_schedule feature and group category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
firewall_schedule_group:
description:
- Schedule group configuration.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
member:
description:
- Schedules added to the schedule group.
suboptions:
name:
description:
- Schedule name. Source firewall.schedule.onetime.name firewall.schedule.recurring.name.
required: true
name:
description:
- Schedule group name.
required: true
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Schedule group configuration.
fortios_firewall_schedule_group:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
firewall_schedule_group:
state: "present"
color: "3"
member:
-
name: "default_name_5 (source firewall.schedule.onetime.name firewall.schedule.recurring.name)"
name: "default_name_6"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_schedule_group_data(json):
option_list = ['color', 'member', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_schedule_group(data, fos):
vdom = data['vdom']
firewall_schedule_group_data = data['firewall_schedule_group']
filtered_data = filter_firewall_schedule_group_data(firewall_schedule_group_data)
if firewall_schedule_group_data['state'] == "present":
return fos.set('firewall.schedule',
'group',
data=filtered_data,
vdom=vdom)
elif firewall_schedule_group_data['state'] == "absent":
return fos.delete('firewall.schedule',
'group',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall_schedule(data, fos):
login(data)
methodlist = ['firewall_schedule_group']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"firewall_schedule_group": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall_schedule(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
rhurkes/chasegame
|
refs/heads/master
|
venv/lib/python2.7/site-packages/jinja2/testsuite/loader.py
|
411
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.loader
~~~~~~~~~~~~~~~~~~~~~~~
Test the loaders.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import tempfile
import shutil
import unittest
from jinja2.testsuite import JinjaTestCase, dict_loader, \
package_loader, filesystem_loader, function_loader, \
choice_loader, prefix_loader
from jinja2 import Environment, loaders
from jinja2._compat import PYPY, PY2
from jinja2.loaders import split_template_path
from jinja2.exceptions import TemplateNotFound
class LoaderTestCase(JinjaTestCase):
def test_dict_loader(self):
env = Environment(loader=dict_loader)
tmpl = env.get_template('justdict.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_package_loader(self):
env = Environment(loader=package_loader)
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_filesystem_loader(self):
env = Environment(loader=filesystem_loader)
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
tmpl = env.get_template('foo/test.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_choice_loader(self):
env = Environment(loader=choice_loader)
tmpl = env.get_template('justdict.html')
assert tmpl.render().strip() == 'FOO'
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_function_loader(self):
env = Environment(loader=function_loader)
tmpl = env.get_template('justfunction.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_prefix_loader(self):
env = Environment(loader=prefix_loader)
tmpl = env.get_template('a/test.html')
assert tmpl.render().strip() == 'BAR'
tmpl = env.get_template('b/justdict.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing')
def test_caching(self):
changed = False
class TestLoader(loaders.BaseLoader):
def get_source(self, environment, template):
return u'foo', None, lambda: not changed
env = Environment(loader=TestLoader(), cache_size=-1)
tmpl = env.get_template('template')
assert tmpl is env.get_template('template')
changed = True
assert tmpl is not env.get_template('template')
changed = False
env = Environment(loader=TestLoader(), cache_size=0)
assert env.get_template('template') \
is not env.get_template('template')
env = Environment(loader=TestLoader(), cache_size=2)
t1 = env.get_template('one')
t2 = env.get_template('two')
assert t2 is env.get_template('two')
assert t1 is env.get_template('one')
t3 = env.get_template('three')
assert 'one' in env.cache
assert 'two' not in env.cache
assert 'three' in env.cache
def test_dict_loader_cache_invalidates(self):
mapping = {'foo': "one"}
env = Environment(loader=loaders.DictLoader(mapping))
assert env.get_template('foo').render() == "one"
mapping['foo'] = "two"
assert env.get_template('foo').render() == "two"
def test_split_template_path(self):
assert split_template_path('foo/bar') == ['foo', 'bar']
assert split_template_path('./foo/bar') == ['foo', 'bar']
self.assert_raises(TemplateNotFound, split_template_path, '../foo')
class ModuleLoaderTestCase(JinjaTestCase):
archive = None
def compile_down(self, zip='deflated', py_compile=False):
super(ModuleLoaderTestCase, self).setup()
log = []
self.reg_env = Environment(loader=prefix_loader)
if zip is not None:
self.archive = tempfile.mkstemp(suffix='.zip')[1]
else:
self.archive = tempfile.mkdtemp()
self.reg_env.compile_templates(self.archive, zip=zip,
log_function=log.append,
py_compile=py_compile)
self.mod_env = Environment(loader=loaders.ModuleLoader(self.archive))
return ''.join(log)
def teardown(self):
super(ModuleLoaderTestCase, self).teardown()
if hasattr(self, 'mod_env'):
if os.path.isfile(self.archive):
os.remove(self.archive)
else:
shutil.rmtree(self.archive)
self.archive = None
def test_log(self):
log = self.compile_down()
assert 'Compiled "a/foo/test.html" as ' \
'tmpl_a790caf9d669e39ea4d280d597ec891c4ef0404a' in log
assert 'Finished compiling templates' in log
assert 'Could not compile "a/syntaxerror.html": ' \
'Encountered unknown tag \'endif\'' in log
def _test_common(self):
tmpl1 = self.reg_env.get_template('a/test.html')
tmpl2 = self.mod_env.get_template('a/test.html')
assert tmpl1.render() == tmpl2.render()
tmpl1 = self.reg_env.get_template('b/justdict.html')
tmpl2 = self.mod_env.get_template('b/justdict.html')
assert tmpl1.render() == tmpl2.render()
def test_deflated_zip_compile(self):
self.compile_down(zip='deflated')
self._test_common()
def test_stored_zip_compile(self):
self.compile_down(zip='stored')
self._test_common()
def test_filesystem_compile(self):
self.compile_down(zip=None)
self._test_common()
def test_weak_references(self):
self.compile_down()
tmpl = self.mod_env.get_template('a/test.html')
key = loaders.ModuleLoader.get_template_key('a/test.html')
name = self.mod_env.loader.module.__name__
assert hasattr(self.mod_env.loader.module, key)
assert name in sys.modules
# unset all, ensure the module is gone from sys.modules
self.mod_env = tmpl = None
try:
import gc
gc.collect()
except:
pass
assert name not in sys.modules
# This test only makes sense on non-pypy python 2
if PY2 and not PYPY:
def test_byte_compilation(self):
log = self.compile_down(py_compile=True)
assert 'Byte-compiled "a/test.html"' in log
tmpl1 = self.mod_env.get_template('a/test.html')
mod = self.mod_env.loader.module. \
tmpl_3c4ddf650c1a73df961a6d3d2ce2752f1b8fd490
assert mod.__file__.endswith('.pyc')
def test_choice_loader(self):
log = self.compile_down()
self.mod_env.loader = loaders.ChoiceLoader([
self.mod_env.loader,
loaders.DictLoader({'DICT_SOURCE': 'DICT_TEMPLATE'})
])
tmpl1 = self.mod_env.get_template('a/test.html')
self.assert_equal(tmpl1.render(), 'BAR')
tmpl2 = self.mod_env.get_template('DICT_SOURCE')
self.assert_equal(tmpl2.render(), 'DICT_TEMPLATE')
def test_prefix_loader(self):
log = self.compile_down()
self.mod_env.loader = loaders.PrefixLoader({
'MOD': self.mod_env.loader,
'DICT': loaders.DictLoader({'test.html': 'DICT_TEMPLATE'})
})
tmpl1 = self.mod_env.get_template('MOD/a/test.html')
self.assert_equal(tmpl1.render(), 'BAR')
tmpl2 = self.mod_env.get_template('DICT/test.html')
self.assert_equal(tmpl2.render(), 'DICT_TEMPLATE')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoaderTestCase))
suite.addTest(unittest.makeSuite(ModuleLoaderTestCase))
return suite
|
and2egg/philharmonic
|
refs/heads/master
|
philharmonic/manager/__init__.py
|
2
|
from .imanager import IManager, ManagerFactory
|
heke123/chromium-crosswalk
|
refs/heads/master
|
tools/chrome_proxy/common/network_metrics_unittest.py
|
37
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from common import network_metrics
from telemetry.testing import test_page_test_results
from telemetry.timeline import event
HTML_BODY = """<!DOCTYPE HTML>
<html>
<head> </head>
<body>
<div id="test"> TEST HTML</div>
</body>
</html>"""
IMAGE_BODY = """fake image data"""
GZIPPED_HTML_LEN = network_metrics.HTTPResponse.GetGizppedBodyLength(HTML_BODY)
# Make up original content length for the image.
IMAGE_OCL = 3 * len(IMAGE_BODY)
class NetworkMetricTest(unittest.TestCase):
@staticmethod
def MakeNetworkTimelineEvent(
url, response_headers, body=None, base64_encoded_body=False,
served_from_cache=False, request_headers=None, status=200,
remote_port=None):
if not request_headers:
request_headers = {}
e = event.TimelineEvent('network', 'HTTPResponse', 0, 0)
e.args = {}
e.args['requestId'] = 0
e.args['response'] = {
'status': status,
'url': url,
'headers': response_headers,
'requestHeaders': request_headers,
'remotePort': remote_port,
}
e.args['body'] = body
e.args['base64_encoded_body'] = base64_encoded_body
e.args['served_from_cache'] = served_from_cache
return e
def testHTTPResponse(self):
url = 'http://test.url'
self.assertLess(GZIPPED_HTML_LEN, len(HTML_BODY))
# A plain text HTML response
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url=url,
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY))
self.assertEqual(url, resp.response.url)
body, base64_encoded = resp.response.GetBody()
self.assertEqual(HTML_BODY, body)
self.assertFalse(base64_encoded)
self.assertEqual('text/html', resp.response.GetHeader('Content-Type'))
self.assertEqual(len(HTML_BODY), resp.content_length)
self.assertEqual(None, resp.response.GetHeader('Content-Encoding'))
self.assertFalse(resp.has_original_content_length)
self.assertEqual(0.0, resp.data_saving_rate)
# A gzipped HTML response
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url=url,
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY))
body, base64_encoded = resp.response.GetBody()
self.assertFalse(base64_encoded)
self.assertEqual(GZIPPED_HTML_LEN, resp.content_length)
self.assertEqual('gzip', resp.response.GetHeader('Content-Encoding'))
self.assertTrue(resp.has_original_content_length)
self.assertEqual(len(HTML_BODY), resp.original_content_length)
self.assertEqual(
float(len(HTML_BODY) - GZIPPED_HTML_LEN) / len(HTML_BODY),
resp.data_saving_rate)
# A JPEG image response.
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(IMAGE_OCL),
},
body=base64.b64encode(IMAGE_BODY),
base64_encoded_body=True))
body, base64_encoded = resp.response.GetBody()
self.assertTrue(base64_encoded)
self.assertEqual(IMAGE_BODY, base64.b64decode(body))
self.assertEqual(len(IMAGE_BODY), resp.content_length)
self.assertTrue(resp.has_original_content_length)
self.assertEqual(IMAGE_OCL, resp.original_content_length)
self.assertFalse(resp.response.served_from_cache)
self.assertEqual(float(IMAGE_OCL - len(IMAGE_BODY)) / IMAGE_OCL,
resp.data_saving_rate)
# A JPEG image response from cache.
resp = network_metrics.HTTPResponse(self.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(IMAGE_OCL),
},
body=base64.b64encode(IMAGE_BODY),
base64_encoded_body=True,
served_from_cache=True))
self.assertEqual(len(IMAGE_BODY), resp.content_length)
self.assertTrue(resp.has_original_content_length)
self.assertEqual(IMAGE_OCL, resp.original_content_length)
# Cached resource has zero saving.
self.assertTrue(resp.response.served_from_cache)
self.assertEqual(0.0, resp.data_saving_rate)
def testNetworkMetricResults(self):
events = [
# A plain text HTML.
self.MakeNetworkTimelineEvent(
url='http://test.html1',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY),
# A compressed HTML.
self.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(HTML_BODY)),
},
body=HTML_BODY),
# A base64 encoded image.
self.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(IMAGE_OCL),
},
body=base64.b64encode(IMAGE_BODY),
base64_encoded_body=True),
]
metric = network_metrics.NetworkMetric()
metric._events = events
metric.compute_data_saving = True
self.assertTrue(len(events), len(list(metric.IterResponses(None))))
results = test_page_test_results.TestPageTestResults(self)
metric.AddResults(None, results)
cl = len(HTML_BODY) + GZIPPED_HTML_LEN + len(IMAGE_BODY)
results.AssertHasPageSpecificScalarValue('content_length', 'bytes', cl)
ocl = len(HTML_BODY) + len(HTML_BODY) + IMAGE_OCL
results.AssertHasPageSpecificScalarValue(
'original_content_length', 'bytes', ocl)
saving_percent = float(ocl - cl) * 100/ ocl
results.AssertHasPageSpecificScalarValue(
'data_saving', 'percent', saving_percent)
|
romain-dartigues/ansible
|
refs/heads/devel
|
lib/ansible/plugins/terminal/iosxr.py
|
38
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n][\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br']]>]]>[\r\n]?')
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"% ?This command is not authorized"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Failed to commit", re.I)
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 512', b'terminal exec prompt no-timestamp'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
maurofaccenda/ansible
|
refs/heads/devel
|
lib/ansible/modules/utilities/logic/async_status.py
|
56
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: async_status
short_description: Obtain status of asynchronous task
description:
- "This module gets the status of an asynchronous task."
version_added: "0.5"
options:
jid:
description:
- Job or task identifier
required: true
default: null
aliases: []
mode:
description:
- if C(status), obtain the status; if C(cleanup), clean up the async job cache
located in C(~/.ansible_async/) for the specified job I(jid).
required: false
choices: [ "status", "cleanup" ]
default: "status"
notes:
- See also U(http://docs.ansible.com/playbooks_async.html)
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
import datetime
import traceback
from ansible.module_utils.six import iteritems
def main():
module = AnsibleModule(argument_spec=dict(
jid=dict(required=True),
mode=dict(default='status', choices=['status','cleanup']),
))
mode = module.params['mode']
jid = module.params['jid']
# setup logging directory
logdir = os.path.expanduser("~/.ansible_async")
log_path = os.path.join(logdir, jid)
if not os.path.exists(log_path):
module.fail_json(msg="could not find job", ansible_job_id=jid, started=1, finished=1)
if mode == 'cleanup':
os.unlink(log_path)
module.exit_json(ansible_job_id=jid, erased=log_path)
# NOT in cleanup mode, assume regular status mode
# no remote kill mode currently exists, but probably should
# consider log_path + ".pid" file and also unlink that above
data = None
try:
data = open(log_path).read()
data = json.loads(data)
except Exception:
if not data:
# file not written yet? That means it is running
module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0)
else:
module.fail_json(ansible_job_id=jid, results_file=log_path,
msg="Could not parse job output: %s" % data, started=1, finished=1)
if not 'started' in data:
data['finished'] = 1
data['ansible_job_id'] = jid
elif 'finished' not in data:
data['finished'] = 0
# Fix error: TypeError: exit_json() keywords must be strings
data = dict([(str(k), v) for k, v in iteritems(data)])
module.exit_json(**data)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
sharadbhat/Video-Sharing-Platform
|
refs/heads/master
|
Server/server.py
|
1
|
from flask import Flask, request, send_file
import database
import base64
import os
import uuid
import calendar
from fuzzy_search import fuzzy
from image_capture import save_image
app = Flask(__name__)
db = database.Database()
UPLOAD_FOLDER = 'static/videos'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/html/<filename>", methods = ['GET'])
def return_html(filename):
"""
- Returns the html file with the corresponding filename.
"""
if request.method == 'GET':
return send_file('./templates/{}'.format(filename), mimetype='text/html')
@app.route("/css/<filename>", methods = ['GET'])
def return_css(filename):
"""
- Returns the css file with the corresponding filename.
"""
if request.method == 'GET':
return send_file('./static/css/{}'.format(filename), mimetype='text/css')
@app.route("/js/<filename>", methods = ['GET'])
def return_js(filename):
"""
- Returns the js file with the corresponding filename.
"""
if request.method == 'GET':
return send_file('./static/js/{}'.format(filename), mimetype='text/js')
@app.route("/favicon.png", methods = ['GET'])
def return_favicon():
"""
- Returns the favicon image.
"""
if request.method == 'GET':
return send_file('./static/img/favicon.png', mimetype='image/png')
@app.route("/is-available/<video_ID>", methods = ['GET'])
def return_availability(video_ID):
"""
- Returns True is the video ID is present in the database.
- Otherwise False.
"""
if request.method == 'GET':
return str(db.is_available(video_ID))
@app.route("/video/<video_ID>", methods = ['GET'])
def return_video(video_ID):
"""
- Returns the video file with the corresponding video ID.
"""
if request.method == 'GET':
return send_file('./static/videos/{}.mp4'.format(video_ID), mimetype='video/mp4')
@app.route("/image/<video_ID>", methods = ['GET'])
def return_image(video_ID):
"""
- Returns the image file with the corresponding video ID.
"""
if request.method == 'GET':
return send_file('./static/images/{}.jpg'.format(video_ID), mimetype='image/jpg')
@app.route("/title/<video_ID>", methods = ['GET'])
def return_title(video_ID):
"""
- Returns the title of the video with the corresponding video ID.
"""
if request.method == 'GET':
return db.get_video_title(video_ID)
@app.route("/views/<video_ID>", methods = ['GET'])
def return_views(video_ID):
"""
- Returns the view count of the video with the corresponding video ID.
"""
if request.method == 'GET':
return db.get_views(video_ID)
@app.route("/uploader/<video_ID>", methods = ['GET'])
def return_uploader(video_ID):
"""
- Returns the uploader of the video with the corresponding video ID.
"""
if request.method == 'GET':
return db.get_video_uploader(video_ID)
@app.route("/upload-date/<video_ID>", methods = ['GET'])
def return_date(video_ID):
"""
- Returns the upload date of the video with the corresponding video ID.
"""
if request.method == 'GET':
upload_date = str(db.get_upload_date(video_ID))
vid_date = upload_date.split("-")
month = calendar.month_abbr[int(vid_date[1])]
video_upload_date = "{} {}, {}".format(month, vid_date[2], vid_date[0])
return video_upload_date
@app.route("/update-count", methods = ['POST'])
def update_count():
"""
- Updates the view count of the video with the corresponding video ID.
"""
if request.method == 'POST':
video_ID = request.form['video_ID']
db.update_view_count(video_ID)
return "1"
@app.route("/update-watched", methods = ['POST'])
def update_watched():
"""
- Updates the watched list of the user.
"""
if request.method == 'POST':
username = request.form['username']
video_ID = request.form['video_ID']
db.update_watched(username, video_ID)
return "1"
@app.route("/random", methods = ['GET'])
def return_random_ID():
"""
- Return a ranodm video ID.
"""
if request.method == 'GET':
return db.get_random_ID()
@app.route("/fuzzy/<search_key>", methods = ['GET'])
def fuzzy_results(search_key):
"""
- Returns a list of closest matches for the search key.
"""
if request.method == 'GET':
video_dict, video_titles = db.video_dict()
return str(fuzzy(search_key, video_dict, video_titles))
@app.route("/get-most-viewed", methods = ['GET'])
def return_most_viewed():
"""
- Returns a list of most viewed videos.
"""
if request.method == 'GET':
return str(db.get_most_viewed())
@app.route("/is-valid-user", methods = ['POST'])
def return_is_valid_user():
"""
- Returns True if the user is a valid user.
- Else returns False.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
return str(db.is_valid_user(username, password))
@app.route("/is-valid-username/<username>", methods = ["GET"])
def return_is_valid_username(username):
"""
- Checks if the user is a valid user.
"""
if request.method == 'GET':
return str(db.is_valid_username(username))
@app.route("/add-user", methods = ['POST'])
def add_user():
"""
- Adds the new user credentials to the database.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db.add_user(username, password)
return "1"
@app.route("/update-password", methods = ['POST'])
def update_password():
"""
- Updates the password of the user.
"""
if request.method == 'POST':
username = request.form['username']
old_password = request.form['old_password']
new_password = request.form['new_password']
if db.is_valid_user(username, old_password) == True:
db.update_password(username, new_password)
return "True"
else:
return "False"
@app.route("/delete-user", methods = ['POST'])
def delete_user():
"""
- Deletes the user's account.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if db.is_valid_user(username, password):
db.delete_user(username)
return "True"
else:
return "False"
@app.route("/is-admin/<username>", methods = ['GET'])
def return_is_admin(username):
"""
- Checks if the user is an administrator.
"""
if request.method == 'GET':
return str(db.is_admin(username))
@app.route("/upload", methods = ['POST'])
def upload_video():
"""
- Uploads the video.
"""
if request.method == 'POST':
video_ID = str(base64.urlsafe_b64encode(str.encode(str(uuid.uuid4().fields[5]))))[2:-1]
username = request.form['username']
title = request.form['title']
file = request.form['file']
filename = open('./static/videos/{}.mp4'.format(video_ID), "wb")
filename.write(base64.b64decode(file))
db.upload_video(video_ID, username, title)
save_image(video_ID)
return video_ID
@app.route("/watched/<username>", methods = ['GET'])
def return_watched(username):
"""
- Returns a list of video IDs watched by the user.
"""
if request.method == 'GET':
return str(db.get_watched(username))
@app.route("/uploaded/<username>", methods = ['GET'])
def return_uploaded(username):
"""
- Returns a list of video IDs uploaded by the user.
"""
if request.method == 'GET':
return str(db.get_uploaded(username))
@app.route("/is-user-present/<username>", methods = ['GET'])
def return_user_availability(username):
"""
- Checks if the user is present in the database.
"""
if request.method == 'GET':
return str(db.is_user_present(username))
@app.route("/delete-video", methods = ['POST'])
def delete_video():
"""
- If the user is the uploader of the video, it is deleted.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
video_ID = request.form['video_ID']
if db.is_valid_user(username, password) == True:
db.delete_video(video_ID)
return str(True)
else:
return str(False)
@app.route("/get-random/<video_ID>", methods = ['GET'])
def return_random_video_IDs(video_ID):
"""
- Returns 5 random video IDs.
- If the list contains the current video ID, it is removed.
"""
if request.method == 'GET':
random = db.get_five_random_IDs()
if video_ID in random:
random.remove(video_ID)
return str(random)
@app.route("/flag", methods = ['POST'])
def flag_video_ID():
"""
- Gets the username and the video ID to be flagged.
- Flags the video is the FLAGS table.
"""
if request.method == 'POST':
username = request.form['username']
video_ID = request.form['video_ID']
db.flag_ID(username, video_ID)
return "1"
@app.route("/user-video-count/<username>", methods = ['GET'])
def return_user_video_count(username):
"""
In GET request
- Returns number of videos uploaded by the user.
"""
if request.method == 'GET':
return str(db.get_user_video_count(username))
@app.route("/user-view-count/<username>", methods = ['GET'])
def return_user_view_count(username):
"""
In GET request
- Returns number of views on all videos uploaded by the user.
"""
if request.method == 'GET':
return str(db.get_user_view_count(username))
@app.route("/user-best-video/<username>", methods = ['GET'])
def return_user_best_video(username):
"""
In GET request
- Returns video ID of the video uploaded by the user with the highest view count.
"""
if request.method == 'GET':
return str(db.get_best_video_ID(username))
@app.route("/user-fav-video/<username>", methods = ['GET'])
def return_user_fav_video(username):
"""
In GET request
- Returns video ID of the video uploaded by the user with the highest view count.
"""
if request.method == 'GET':
return str(db.get_fav_video_ID(username))
# ADMIN PART
@app.route("/add-admin", methods = ['POST'])
def add_admin():
"""
In POST request
- Adds the new administrator to the ADMINS table.
"""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
db.add_admin(username, password)
return "1"
@app.route("/flagger/<video_ID>", methods = ['GET'])
def return_flagger(video_ID):
"""
In GET request
- Returns the username of the user that flagged the video with the corresponding video ID from the FLAGS table.
"""
if request.method == 'GET':
return str(db.get_flagger(video_ID))
@app.route("/flagged", methods = ['GET'])
def return_flagged():
"""
In GET request
- Returns a list of flagged videos.
"""
if request.method == 'GET':
return str(db.get_flagged())
@app.route("/admin-delete-video", methods = ['POST'])
def admin_delete_video():
"""
In POST request
- Deletes the video from VIDEOS table.
"""
if request.method == 'POST':
video_ID = request.form['video_ID']
print(video_ID)
db.delete_video(video_ID)
return "1"
@app.route("/user-list", methods = ['GET'])
def return_users_list():
"""
In GET request
- Returns a list of users in the database.
"""
if request.method == 'GET':
return str(db.user_list())
@app.route("/num-videos/<username>", methods = ['GET'])
def return_user_video_number(username):
"""
In GET request
- Returns the number of videos uploaded by the user with the corresponding username.
"""
if request.method == 'GET':
return str(db.get_video_num(username))
@app.route("/num-flags/<username>", methods = ['GET'])
def return_user_flagged_number(username):
"""
In GET request
- Returns the number of videos uploaded by the user that have been flagged by other users.
"""
if request.method == 'GET':
return str(db.get_flagged_num(username))
@app.route("/admin-delete-user", methods = ['POST'])
def admin_delete_user():
"""
In POST request
- Delete the user with the corresponding username.
"""
if request.method == 'POST':
username = request.form['username']
db.delete_user(username)
return "1"
@app.route("/user-count", methods = ['GET'])
def return_user_count():
"""
In GET request
- Returns number of users in the USERS table.
"""
if request.method == 'GET':
return str(db.get_user_count())
@app.route("/video-count", methods = ['GET'])
def return_video_count():
"""
In GET request
- Returns number of videos in the VIDEOS table.
"""
if request.method == 'GET':
return str(db.get_video_count())
@app.route("/view-count", methods = ['GET'])
def return_view_count():
"""
In GET request
- Returns number of views on all videos in the VIDEOS table.
"""
if request.method == 'GET':
return str(db.get_total_view_count())
@app.route("/flag-count", methods = ['GET'])
def return_flag_count():
"""
In GET request
- Returns number of flagged videos in the VIDEOS table.
"""
if request.method == 'GET':
return str(db.get_flag_count())
@app.route("/favourites/<username>", methods = ['GET'])
def return_favourites(username):
"""
In GET request
- Returns a list of videos favourited by the user.
"""
if request.method == 'GET':
return str(db.get_favourites(username))
@app.route("/remove-flag", methods = ['POST'])
def remove_flag():
"""
In POST request
- Removes the flag for the video with the corresponding video ID from the FLAGS table.
"""
if request.method == 'POST':
video_ID = request.form['video_ID']
db.delete_flag(video_ID)
return "1"
if __name__ == '__main__':
app.run(port=8080, debug=True)
|
mikkylok/mikky.lu
|
refs/heads/master
|
venv/lib/python2.7/site-packages/jinja2/debug.py
|
132
|
# -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType, CodeType
from jinja2.utils import missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
from jinja2._compat import iteritems, reraise, PY2
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def get_jinja_locals(real_locals):
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
local_overrides = {}
for name, value in iteritems(real_locals):
if not name.startswith('l_') or value is missing:
continue
try:
_, depth, name = name.split('_', 2)
depth = int(depth)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
for name, (_, value) in iteritems(local_overrides):
if value is missing:
locals.pop(name, None)
else:
locals[name] = value
return locals
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
locals = get_jinja_locals(tb.tb_frame.f_locals)
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
if PY2:
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
else:
code = CodeType(0, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except Exception as e:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
if PY2:
# figure out size of _Py_ssize_t for Python 2:
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
else:
# platform ssize_t on Python 3
_Py_ssize_t = ctypes.c_ssize_t
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
|
cloudbase/neutron-virtualbox
|
refs/heads/virtualbox_agent
|
neutron/plugins/cisco/db/network_models_v2.py
|
50
|
# Copyright 2012, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from neutron.db import model_base
class QoS(model_base.BASEV2):
"""Represents QoS policies for a tenant."""
__tablename__ = 'cisco_qos_policies'
qos_id = sa.Column(sa.String(255))
tenant_id = sa.Column(sa.String(255), primary_key=True)
qos_name = sa.Column(sa.String(255), primary_key=True)
qos_desc = sa.Column(sa.String(255))
class Credential(model_base.BASEV2):
"""Represents credentials for a tenant to control Cisco switches."""
__tablename__ = 'cisco_credentials'
credential_id = sa.Column(sa.String(255))
credential_name = sa.Column(sa.String(255), primary_key=True)
user_name = sa.Column(sa.String(255))
password = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
class ProviderNetwork(model_base.BASEV2):
"""Represents networks that were created as provider networks."""
__tablename__ = 'cisco_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(255), nullable=False)
segmentation_id = sa.Column(sa.Integer, nullable=False)
|
ninjin/spearmint-lite
|
refs/heads/master
|
gp.py
|
1
|
##
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.linalg as spla
import scipy.optimize as spo
import scipy.io as sio
import matplotlib
import matplotlib.pyplot as plt
import scipy.weave
import time
SQRT_3 = np.sqrt(3.0)
SQRT_5 = np.sqrt(5.0)
def dist2(ls, x1, x2=None):
# Assumes NxD and MxD matrices.
# Compute the squared distance matrix, given length scales.
if x2 is None:
# Find distance with self for x1.
# Rescale.
xx1 = x1 / ls
xx2 = xx1
else:
# Rescale.
xx1 = x1 / ls
xx2 = x2 / ls
r2 = np.maximum(-(np.dot(xx1, 2*xx2.T)
- np.sum(xx1*xx1, axis=1)[:,np.newaxis]
- np.sum(xx2*xx2, axis=1)[:,np.newaxis].T), 0.0)
return r2
def grad_dist2(ls, x1, x2=None):
if x2 is None:
x2 = x1
# Rescale.
x1 = x1 / ls
x2 = x2 / ls
N = x1.shape[0]
M = x2.shape[0]
D = x1.shape[1]
gX = np.zeros((x1.shape[0],x2.shape[0],x1.shape[1]))
code = \
"""
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
for (int d=0; d<D; d++)
gX(i,j,d) = (2/ls(d))*(x1(i,d) - x2(j,d));
"""
scipy.weave.inline(code, ['x1','x2','gX','ls','M','N','D'], \
type_converters=scipy.weave.converters.blitz, \
compiler='gcc')
# The C code weave above is 10x faster than this:
#for i in xrange(0,x1.shape[0]):
# gX[i,:,:] = 2*(x1[i,:] - x2[:,:])*(1/ls)
return gX
def SE(ls, x1, x2=None, grad=False):
ls = np.ones(ls.shape)
cov = np.exp(-0.5 * dist2(ls, x1, x2))
if grad:
return (cov, grad_ARDSE(ls, x1, x2))
else:
return cov
def ARDSE(ls, x1, x2=None, grad=False):
cov = np.exp(-0.5 * dist2(ls, x1, x2))
if grad:
return (cov, grad_ARDSE(ls, x1, x2))
else:
return cov
def grad_ARDSE(ls, x1, x2=None):
r2 = dist2(ls, x1, x2)
r = np.sqrt(r2)
return -0.5*np.exp(-0.5*r2)[:,:,np.newaxis] * grad_dist2(ls, x1, x2)
def Matern32(ls, x1, x2=None, grad=False):
r = np.sqrt(dist2(ls, x1, x2))
cov = (1 + SQRT_3*r) * np.exp(-SQRT_3*r)
if grad:
return (cov, grad_Matern32(ls, x1, x2))
else:
return cov
def grad_Matern32(ls, x1, x2=None):
r = np.sqrt(dist2(ls, x1, x2))
grad_r2 = -1.5*np.exp(-SQRT_3*r)
return grad_r2[:,:,np.newaxis] * grad_dist2(ls, x1, x2)
def Matern52(ls, x1, x2=None, grad=False):
r2 = np.abs(dist2(ls, x1, x2))
r = np.sqrt(r2)
cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r)
if grad:
return (cov, grad_Matern52(ls, x1, x2))
else:
return cov
def grad_Matern52(ls, x1, x2=None):
r = np.sqrt(dist2(ls, x1, x2))
grad_r2 = -(5.0/6.0)*np.exp(-SQRT_5*r)*(1 + SQRT_5*r)
return grad_r2[:,:,np.newaxis] * grad_dist2(ls, x1, x2)
def linearARD(ls, x1, x2=None, grad=False):
if x2 is None:
# Find distance with self for x1.
# Rescale.
xx1 = x1 / ls
xx2 = xx1
else:
# Rescale.
xx1 = x1 / ls
xx2 = x2 / ls
K = np.dot(xx1, xx2.T)
if grad:
gKx = (-2*np.dot(xx1[:,np.newaxis,:], xx2[:,np.newaxis,:]).T)[0,:,:,:]
return (K, gKx)
else:
return K
def dist_Mahalanobis(U, x1, x2=None):
W = np.dot(U,U.T)
class GP:
def __init__(self, covar="Matern52", mcmc_iters=10, noiseless=False):
self.cov_func = globals()[covar]
self.mcmc_iters = int(mcmc_iters)
self.D = -1
self.hyper_iters = 1
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
def real_init(self, dims, values):
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values)
# Initial observation noise.
self.noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
def cov(self, x1, x2=None):
if x2 is None:
return self.amp2 * (self.cov_func(self.ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return self.amp2 * self.cov_func(self.ls, x1, x2)
def logprob(self, comp, vals):
mean = self.mean
amp2 = self.amp2
noise = self.noise
cov = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
return lp
def optimize_hypers(self, comp, vals):
self.mean = np.mean(vals)
diffs = vals - self.mean
state = { }
def jitter_chol(covmat):
passed = False
jitter = 1e-8
val = 0
while not passed:
if (jitter > 100000):
val = spla.cholesky(np.eye(covmat.shape[0]))
break
try:
val = spla.cholesky(covmat +
jitter*np.eye(covmat.shape[0]), lower=True)
passed = True
except ValueError:
jitter = jitter*1.1
print "Covariance matrix not PSD, adding jitter:", jitter
passed = False
return val
def memoize(amp2, noise, ls):
if ( 'corr' not in state
or state['amp2'] != amp2
or state['noise'] != noise
or np.any(state['ls'] != ls)):
# Get the correlation matrix
(corr, grad_corr) = self.cov_func(ls, comp, None, grad=True)
# Scale and add noise & jitter.
covmat = (amp2 * (corr + 1e-6*np.eye(comp.shape[0]))
+ noise * np.eye(comp.shape[0]))
# Memoize
state['corr'] = corr
state['grad_corr'] = grad_corr
state['chol'] = jitter_chol(covmat)
state['amp2'] = amp2
state['noise'] = noise
state['ls'] = ls
return (state['chol'], state['corr'], state['grad_corr'])
def nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol = memoize(amp2, noise, ls)[0]
solve = spla.cho_solve((chol, True), diffs)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(diffs, solve)
return -lp
def grad_nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol, corr, grad_corr = memoize(amp2, noise, ls)
solve = spla.cho_solve((chol, True), diffs)
inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))
jacobian = np.outer(solve, solve) - inv_cov
grad = np.zeros(self.D + 2)
# Log amplitude gradient.
grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2
# Log noise gradient.
grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise
# Log length scale gradients.
for dd in xrange(self.D):
grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])
# Roll in the prior variance.
#grad -= 2*hypers/self.hyper_prior
return -grad
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(vals)
# Initial observation noise.
self.noise = 1e-3
hypers = np.zeros(self.ls.shape[0]+2)
hypers[0] = np.log(self.amp2)
hypers[1] = np.log(self.noise)
hypers[2:] = np.log(self.ls)
# Use a bounded bfgs just to prevent the length-scales and noise from
# getting into regions that are numerically unstable
b = [(-10,10),(-10,10)]
for i in xrange(comp.shape[1]):
b.append((-10,5))
hypers = spo.fmin_l_bfgs_b(nlogprob, hypers, grad_nlogprob, args=(), bounds=b, disp=0)
#hypers = spo.fmin_bfgs(nlogprob, hypers, grad_nlogprob, maxiter=100)
hypers = hypers[0]
#hypers = spo.fmin_bfgs(nlogprob, hypers, grad_nlogprob, maxiter=100)
self.amp2 = np.exp(hypers[0])
self.noise = np.exp(hypers[1])
self.ls = np.exp(hypers[2:])
def main():
# Let's start with some random values
x = np.linspace(0,1,10)[:,np.newaxis]*10#np.random.rand(100)[:,np.newaxis]
y = np.random.randn(10)
mygp = GP(covar='linearARD')
mygp.real_init(x.shape[1], y)
# Sample some functions given these hyperparameters and plot them
for i in xrange(0,5):
x = np.linspace(0,1,100)[:,np.newaxis]*10
K = mygp.cov(x)
y = np.random.randn(100)
fsamp = mygp.mean + np.dot(spla.cholesky(K).transpose(), y)
plt.plot(x, fsamp)
print 'Loglikelihood before optimizing: ', mygp.logprob(x,y)
mygp.optimize_hypers(x,y)
print 'Loglikelihood after optimizing: ', mygp.logprob(x,y)
plt.show()
if __name__ == '__main__':
main()
|
lostdj/Jaklin-OpenJFX
|
refs/heads/jaklin-master
|
modules/web/src/main/native/Tools/Scripts/webkitpy/style/main_unittest.py
|
124
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from main import change_directory
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.logtesting import LogTesting
class ChangeDirectoryTest(unittest.TestCase):
_original_directory = "/original"
_checkout_root = "/WebKit"
def setUp(self):
self._log = LogTesting.setUp(self)
self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory)
def tearDown(self):
self._log.tearDown()
def _change_directory(self, paths, checkout_root):
return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root)
def _assert_result(self, actual_return_value, expected_return_value,
expected_log_messages, expected_current_directory):
self.assertEqual(actual_return_value, expected_return_value)
self._log.assertMessages(expected_log_messages)
self.assertEqual(self.filesystem.getcwd(), expected_current_directory)
def test_paths_none(self):
paths = self._change_directory(checkout_root=self._checkout_root, paths=None)
self._assert_result(paths, None, [], self._checkout_root)
def test_paths_convertible(self):
paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root)
def test_with_scm_paths_unconvertible(self):
paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"]
paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
log_messages = [
"""WARNING: Path-dependent style checks may not work correctly:
One of the given paths is outside the WebKit checkout of the current
working directory:
Path: /outside/foo2.txt
Checkout root: /WebKit
Pass only files below the checkout root to ensure correct results.
See the help documentation for more info.
"""]
self._assert_result(paths, paths, log_messages, self._original_directory)
|
GriceTurrble/python-amazon-mws
|
refs/heads/develop
|
tests/request_methods/test_finances.py
|
1
|
"""
Tests for the Finances API class.
"""
import unittest
import datetime
import mws
from .utils import CommonRequestTestTools, transform_date
class FinancesTestCase(unittest.TestCase, CommonRequestTestTools):
"""
Test cases for Finances.
"""
# TODO: Add remaining methods for Finances
def setUp(self):
self.api = mws.Finances(
self.CREDENTIAL_ACCESS,
self.CREDENTIAL_SECRET,
self.CREDENTIAL_ACCOUNT,
auth_token=self.CREDENTIAL_TOKEN
)
self.api._test_request_params = True
def test_list_financial_event_groups(self):
"""
ListFinancialEventGroups operation.
"""
created_after = datetime.datetime.utcnow()
created_before = datetime.datetime.utcnow()
max_results = 659
params = self.api.list_financial_event_groups(
created_after=created_after,
created_before=created_before,
max_results=max_results,
)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListFinancialEventGroups')
self.assertEqual(params['FinancialEventGroupStartedAfter'],
transform_date(created_after))
self.assertEqual(params['FinancialEventGroupStartedBefore'],
transform_date(created_before))
self.assertEqual(params['MaxResultsPerPage'], str(max_results))
def test_list_financial_event_groups_by_next_token(self):
"""
ListFinancialEventGroupsByNextToken operation, via method decorator.
"""
next_token = 'VcNq06R0dO'
params = self.api.list_financial_event_groups(next_token=next_token)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListFinancialEventGroupsByNextToken')
self.assertEqual(params['NextToken'], next_token)
def test_list_financial_event_groups_by_next_token_alias(self):
"""
ListFinancialEventGroupsByNextToken operation, via alias method.
"""
next_token = 'uhEPBAvUYR'
params = self.api.list_financial_event_groups_by_next_token(next_token)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListFinancialEventGroupsByNextToken')
self.assertEqual(params['NextToken'], next_token)
def test_list_financial_events(self):
"""
ListFinancialEvents operation.
"""
posted_after = datetime.datetime.utcnow()
posted_before = datetime.datetime.utcnow()
amazon_order_id = '123-4567890-1234567'
financial_event_group_id = '22YgYW55IGNhcm5hbCBwbGVhEXAMPLE'
max_results = 156
params = self.api.list_financial_events(
financial_event_group_id=financial_event_group_id,
amazon_order_id=amazon_order_id,
posted_after=posted_after,
posted_before=posted_before,
max_results=max_results,
)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListFinancialEvents')
self.assertEqual(params['FinancialEventGroupId'], financial_event_group_id)
self.assertEqual(params['AmazonOrderId'], amazon_order_id)
self.assertEqual(params['PostedAfter'], transform_date(posted_after))
self.assertEqual(params['PostedBefore'], transform_date(posted_before))
self.assertEqual(params['MaxResultsPerPage'], str(max_results))
def test_list_financial_events_by_next_token(self):
"""
ListFinancialEventsByNextToken operation, via method decorator.
"""
next_token = '2t1DdnGqgf'
params = self.api.list_financial_events(next_token=next_token)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListFinancialEventsByNextToken')
self.assertEqual(params['NextToken'], next_token)
def test_list_financial_events_by_next_token_alias(self):
"""
ListFinancialEventsByNextToken operation, via alias method.
"""
next_token = '7Ijm9Kmrgp'
params = self.api.list_financial_events_by_next_token(next_token)
self.assert_common_params(params)
self.assertEqual(params['Action'], 'ListFinancialEventsByNextToken')
self.assertEqual(params['NextToken'], next_token)
|
diox/olympia
|
refs/heads/master
|
src/olympia/users/migrations/0004_auto_20201002_1006.py
|
5
|
# Generated by Django 2.2.14 on 2020-10-02 10:06
from django.db import migrations
def clear_user_restriction_history(apps, schema_editor):
fields = {'last_login_ip': '', 'ip_address': ''}
UserRestrictionHistory = apps.get_model('users', 'UserRestrictionHistory')
qs = UserRestrictionHistory.objects.filter(
user__last_login_ip='', user__deleted=True).exclude(**fields)
qs.update(**fields)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20200624_0225'),
]
operations = [
migrations.RunPython(clear_user_restriction_history)
]
|
tolimit/tp-qemu
|
refs/heads/master
|
generic/tests/save_restore.py
|
16
|
import logging
import time
import tempfile
import os.path
from autotest.client.shared import error
def run(test, params, env):
"""
VM save / restore test:
1) Wait save_restore_start_delay seconds (default=10.0)
2) Verify VM is running
3) Pause, save VM to file (optionally in save_restore_path), verify paused.
4) wait save_restore_delay seconds (if specified)
5) restore VM from file, verify running
6) Repeat save_restore_repeat times or
until save_restore_duration seconds pass.
:param test: test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def get_save_filename(path="", file_pfx=""):
"""
Generate a guaranteed not to clash filename.
@oaram: path: Optional base path to place file
:param file_pfxx: Optional prefix to filename
:return: absolute path to new non-clashing filename
"""
if not path:
path = tempfile.gettempdir()
fd, filename = tempfile.mkstemp(prefix=file_pfx, dir=path)
os.close(fd)
return filename
def nuke_filename(filename):
"""
Try to unlink filename, ignore any os errors.
"""
try:
os.unlink(filename)
except OSError:
pass
def check_system(vm, timeout):
"""
Raise TestFail if system is not in expected state
"""
session = None
try:
try:
session = vm.wait_for_login(timeout=timeout)
result = session.is_responsive(timeout=timeout / 10.0)
if not result:
logging.warning(
"Login session established, but non-responsive")
# assume guest is just busy with stuff
except:
raise error.TestFail(
"VM check timed out and/or VM non-responsive")
finally:
del session
vm = env.get_vm(params["main_vm"])
session = vm.wait_for_login(timeout=600)
start_delay = float(params.get("save_restore_start_delay", "10.0"))
restore_delay = float(params.get("save_restore_delay", "0.0"))
save_restore_duration = float(params.get("save_restore_duration", "60.0"))
repeat = int(params.get("save_restore_repeat", "1"))
path = os.path.abspath(params.get("save_restore_path", "/tmp"))
file_pfx = vm.name + '-'
save_file = get_save_filename(path, file_pfx)
save_restore_bg_command = params.get("save_restore_bg_command")
if save_restore_bg_command:
session.cmd(save_restore_bg_command + ' &')
try:
# assume sh-like shell, try to get background process's pid
bg_command_pid = int(session.cmd('jobs -rp'))
except ValueError:
logging.warning(
"Background guest command 'job -rp' output not PID")
bg_command_pid = None
del session # don't leave stray ssh session lying around over save/restore
start_time = time.time()
# 'now' needs outside scope for error.TestFail() at end
# especially if exception thrown in loop before completion
now = time_to_stop = (start_time + save_restore_duration)
while True:
try:
vm.verify_kernel_crash()
check_system(vm, 120) # networking needs time to recover
logging.info("Save/restores left: %d (or %0.4f more seconds)" %
(repeat, (time_to_stop - time.time())))
if start_delay:
logging.debug("Sleeping %0.4f seconds start_delay" %
start_delay)
time.sleep(start_delay)
vm.pause()
vm.verify_kernel_crash()
save_file = get_save_filename(path, file_pfx)
vm.save_to_file(save_file)
vm.verify_kernel_crash()
if restore_delay:
logging.debug("Sleeping %0.4f seconds restore_delay" %
restore_delay)
time.sleep(restore_delay)
vm.restore_from_file(save_file)
vm.verify_kernel_crash()
vm.resume() # make sure some work gets done
vm.verify_kernel_crash()
now = time.time()
finally:
if save_file:
nuke_filename(save_file) # make sure these are cleaned up
# Prepare/check next loop itteration
repeat -= 1
# TODO: or BG test status==foo
if (now >= time_to_stop) or (repeat <= 0):
break
save_file = get_save_filename(path, file_pfx)
# Check the final save/restore cycle
check_system(vm, 120) # networking needs time to recover
logging.info("Save/Restore itteration(s) complete.")
if save_restore_bg_command and bg_command_pid:
session = vm.wait_for_login(timeout=120)
status = session.cmd_status('kill %d' % bg_command_pid)
if status != 0:
logging.warning("Background guest command kill %d failed" %
bg_command_pid)
del session
if repeat > 0: # time_to_stop reached but itterations didn't complete
raise error.TestFail("Save/Restore save_restore_duration"
" exceeded by %0.4f seconds with %d itterations"
" remaining." % (now - time_to_stop, repeat + 1))
|
mrshu/lemm-sk
|
refs/heads/master
|
test_lemmsk.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import lemmsk
import unittest
class TestSimpleLemmatization(unittest.TestCase):
def test_lemmatization(self):
self.assertEqual(lemmsk.lemmatize('kuraciemu'), 'kurací')
self.assertEqual(lemmsk.lemmatize('kuracieho'), 'kurací')
self.assertTrue(lemmsk.is_lemma('kurací'))
self.assertFalse(lemmsk.is_lemma('kuraciemu'))
self.assertFalse(lemmsk.is_lemma('kuracieho'))
if __name__ == '__main__':
unittest.main()
|
xArm-Developer/xArm-Python-SDK
|
refs/heads/master
|
xarm/core/config/x_config.py
|
1
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import math
class XCONF(object):
ARM_AXIS_NUM = 7
GRIPPER_ID = 8
TGPIO_ID = 9
MAX_CMD_NUM = 1024
def __init__(self):
pass
class Robot:
class Axis:
XARM5 = 5
XARM6 = 6
XARM7 = 7
class Type:
XARM6_X1 = 1
XARM7_X2 = 2
XARM7_X3 = 3
XARM7_X3MIR = 4
XARM5_X4 = 5
XARM6_X4 = 6
XARM7_X4 = 7
XARM6_X8 = 8
JOINT_LIMITS = {
Axis.XARM5: {
Type.XARM5_X4: [
(-2 * math.pi, 2 * math.pi),
(-2.059488, 2.094395), # (-2.18, 2.18),
(-3.92699, 0.191986), # (-4.01, 0.1),
(-1.692969, math.pi), # (-1.75, math.pi),
(-2 * math.pi, 2 * math.pi)
],
},
Axis.XARM6: {
Type.XARM6_X4: [
(-2 * math.pi, 2 * math.pi),
(-2.059488, 2.094395), # (-2.18, 2.18),
(-3.92699, 0.191986), # (-4.01, 0.1),
(-2 * math.pi, 2 * math.pi),
(-1.692969, math.pi), # (-1.75, math.pi),
(-2 * math.pi, 2 * math.pi)
],
Type.XARM6_X8: [
(-2 * math.pi, 2 * math.pi),
(-2.059488, 2.094395), # (-2.18, 2.18),
(-0.191986, 3.92699),
(-2 * math.pi, 2 * math.pi),
(-1.692969, math.pi), # (-1.75, math.pi),
(-2 * math.pi, 2 * math.pi)
],
},
Axis.XARM7: {
Type.XARM7_X3: [
(-2 * math.pi, 2 * math.pi),
(-2.059488, 2.094395), # (-2.18, 2.18),
(-2 * math.pi, 2 * math.pi),
(-3.92699, 0.191986), # (-4.01, 0.1),
(-2 * math.pi, 2 * math.pi),
(-1.692969, math.pi), # (-1.75, math.pi),
(-2 * math.pi, 2 * math.pi)
],
Type.XARM7_X4: [
(-2 * math.pi, 2 * math.pi),
(-2.059488, 2.094395), # (-2.18, 2.18),
(-2 * math.pi, 2 * math.pi),
(-0.191986, 3.92699), # (-0.1, 4.01),
(-2 * math.pi, 2 * math.pi),
(-1.692969, math.pi), # (-1.75, math.pi),
(-2 * math.pi, 2 * math.pi)
],
}
}
TCP_LIMITS = {
Axis.XARM5: {
Type.XARM5_X4: [
(-750, 750),
(-750, 750),
(-400, 1000),
(math.pi, math.pi),
(0, 0),
(-math.pi, math.pi)
],
},
Axis.XARM6: {
Type.XARM6_X1: [
(-750, 750),
(-750, 750),
(-400, 1000),
(-math.pi, math.pi),
(-math.pi, math.pi),
(-math.pi, math.pi)
],
Type.XARM6_X4: [
(-750, 750),
(-750, 750),
(-400, 1000),
(-math.pi, math.pi),
(-math.pi, math.pi),
(-math.pi, math.pi)
],
Type.XARM6_X8: [
(-1000, 1000),
(-1000, 1000),
(-600, 1200),
(-math.pi, math.pi),
(-math.pi, math.pi),
(-math.pi, math.pi)
],
},
Axis.XARM7: {
Type.XARM7_X3: [
(-750, 750),
(-750, 750),
(-400, 1000),
(-math.pi, math.pi),
(-math.pi, math.pi),
(-math.pi, math.pi)
],
Type.XARM7_X4: [
(-750, 750),
(-750, 750),
(-400, 1000),
(-math.pi, math.pi),
(-math.pi, math.pi),
(-math.pi, math.pi)
],
}
}
class SerialConf:
SERIAL_BAUD = 2000000 # 921600
UXBUS_RXQUE_MAX = 10
UXBUS_DEF_FROMID = 0xAA
UXBUS_DEF_TOID = 0x55
UX2_HEX_PROTOCOL = 1
UX2_STR_PROTOCOL = 2
UX1_HEX_PROTOCOL = 3
UX1_STR_PROTOCOL = 4
class SocketConf:
TCP_CONTROL_PORT = 502
TCP_REPORT_NORM_PORT = 30001
TCP_REPORT_RICH_PORT = 30002
TCP_REPORT_REAL_PORT = 30003
TCP_RX_QUE_MAX = 1024
TCP_CONTROL_BUF_SIZE = 1024
TCP_REPORT_REAL_BUF_SIZE = 87
TCP_REPORT_NORMAL_BUF_SIZE = 133
TCP_REPORT_RICH_BUF_SIZE = 233
class UxbusReg:
GET_VERSION = 1
GET_ROBOT_SN = 2
CHECK_VERIFY = 3
RELOAD_DYNAMICS = 4
GET_REPORT_TAU_OR_I = 5
SHUTDOWN_SYSTEM = 10
MOTION_EN = 11
SET_STATE = 12
GET_STATE = 13
GET_CMDNUM = 14
GET_ERROR = 15
CLEAN_ERR = 16
CLEAN_WAR = 17
SET_BRAKE = 18
SET_MODE = 19
MOVE_LINE = 21
MOVE_LINEB = 22
MOVE_JOINT = 23
MOVE_JOINTB = 24
MOVE_HOME = 25
SLEEP_INSTT = 26
MOVE_CIRCLE = 27
MOVE_LINE_TOOL = 28
MOVE_SERVOJ = 29
MOVE_SERVO_CART = 30
SET_TCP_JERK = 31
SET_TCP_MAXACC = 32
SET_JOINT_JERK = 33
SET_JOINT_MAXACC = 34
SET_TCP_OFFSET = 35
SET_LOAD_PARAM = 36
SET_COLLIS_SENS = 37
SET_TEACH_SENS = 38
CLEAN_CONF = 39
SAVE_CONF = 40
GET_TCP_POSE = 41
GET_JOINT_POS = 42
GET_IK = 43
GET_FK = 44
IS_JOINT_LIMIT = 45
IS_TCP_LIMIT = 46
SET_REDUCED_TRSV = 47
SET_REDUCED_P2PV = 48
GET_REDUCED_MODE = 49
SET_REDUCED_MODE = 50
SET_GRAVITY_DIR = 51
SET_LIMIT_XYZ = 52
GET_REDUCED_STATE = 53
SET_SERVOT = 54
GET_JOINT_TAU = 55
SET_SAFE_LEVEL = 56
GET_SAFE_LEVEL = 57
SET_REDUCED_JRANGE = 58
SET_FENSE_ON = 59
SET_COLLIS_REB = 60
SET_TRAJ_RECORD = 61
SAVE_TRAJ = 62
LOAD_TRAJ = 63
PLAY_TRAJ = 64
GET_TRAJ_RW_STATUS = 65
REPORT_TAU_OR_I = 70
SET_TIMER = 71
CANCEL_TIMER = 72
SET_WORLD_OFFSET = 73
CNTER_RESET = 74
CNTER_PLUS = 75
CAL_POSE_OFFSET = 76
SET_SELF_COLLIS_CHECK = 77
SET_COLLIS_TOOL = 78
SET_SIMULATION_ROBOT = 79
VC_SET_JOINTV = 81
VC_SET_CARTV = 82
GET_TCP_POSE_AA = 91
MOVE_LINE_AA = 92
MOVE_SERVO_CART_AA = 93
SERVO_W16B = 101
SERVO_R16B = 102
SERVO_W32B = 103
SERVO_R32B = 104
SERVO_ZERO = 105
SERVO_DBMSG = 106
CALI_TCP_POSE = 111
CALI_TCP_ORIENT = 112
CALI_WRLD_ORIENT = 113
CALI_WRLD_POSE = 114
TGPIO_MB_TIOUT = 123
TGPIO_MODBUS = 124
TGPIO_ERR = 125
TGPIO_W16B = 127
TGPIO_R16B = 128
TGPIO_W32B = 129
TGPIO_R32B = 130
CGPIO_GET_DIGIT = 131
CGPIO_GET_ANALOG1 = 132
CGPIO_GET_ANALOG2 = 133
CGPIO_SET_DIGIT = 134
CGPIO_SET_ANALOG1 = 135
CGPIO_SET_ANALOG2 = 136
CGPIO_SET_IN_FUN = 137
CGPIO_SET_OUT_FUN = 138
CGPIO_GET_STATE = 139
GET_PWR_VERSION = 140
GET_HD_TYPES = 141
DELAYED_CGPIO_SET = 142
DELAYED_TGPIO_SET = 143
POSITION_CGPIO_SET = 144
POSITION_TGPIO_SET = 145
SET_IO_STOP_RESET = 146
POSITION_CGPIO_SET_ANALOG = 147
GET_EXE_FT = 150
FTSENSOR_ENABLE = 201
FTSENSOR_SET_APP = 202
FTSENSOR_GET_APP = 203
FTSENSOR_IDEN_LOAD = 204
FTSENSOR_CALI_LOAD_OFFSET = 205
FTSENSOR_SET_ZERO = 206
IMPEDANCE_CONFIG = 207
FORCE_CTRL_PID = 208
FORCE_CTRL_CONFIG = 209
IMPEDANCE_CTRL_MBK = 210
IMPEDANCE_CTRL_CONFIG = 211
class UxbusConf:
SET_TIMEOUT = 2000 # ms
GET_TIMEOUT = 2000 # ms
class ServoConf:
CON_EN = 0x0100
CON_MODE = 0x0101
CON_DIR = 0x0102
SV3MOD_POS = 0
SV3MOD_SPD = 1
SV3MOD_FOS = 2
SV3_SAVE = 0x1000
BRAKE = 0x0104
GET_TEMP = 0x000E
ERR_CODE = 0x000F
OVER_TEMP = 0x0108
CURR_CURR = 0x0001
POS_KP = 0x0200
POS_FWDKP = 0x0201
POS_PWDTC = 0x0202
SPD_KP = 0x0203
SPD_KI = 0x0204
CURR_KP = 0x090C
CURR_KI = 0x090D
SPD_IFILT = 0x030C
SPD_OFILT = 0x030D
POS_CMDILT = 0x030E
CURR_IFILT = 0x0401
POS_KD = 0x0205
POS_ACCT = 0x0300
POS_DECT = 0x0301
POS_STHT = 0x0302
POS_SPD = 0x0303
MT_ID = 0x1600
BAUDRATE = 0x0601
SOFT_REBOOT = 0x0607
TAGET_TOQ = 0x050a
CURR_TOQ = 0x050c
TOQ_SPD = 0x050e
TAGET_POS = 0x0700
CURR_POS = 0x0702
HARD_VER = 0x0800
SOFT_VER = 0x0801
MT_TYPE = 0x0802
MT_ZERO = 0x0817
RESET_PVL = 0x0813
CAL_ZERO = 0x080C
ERR_SWITCH = 0x0910
RESET_ERR = 0x0109
SV3_BRO_ID = 0xFF
MODBUS_BAUDRATE = 0x0A0B
TOOL_MB_TIMEOUT = 0x0A0E
DIGITAL_IN = 0x0A14
DIGITAL_OUT = 0x0A15
ANALOG_IO1 = 0x0A16
ANALOG_IO2 = 0x0A17
class UxbusState:
ERR_CODE = 1 # 有尚未清除的错误
WAR_CODE = 2 # 有尚未清除的警告
ERR_TOUT = 3 # 获取结果超时
ERR_LENG = 4 # TCP回复长度错误
ERR_NUM = 5 # TCP回复序号错误
ERR_PROT = 6 # TCP协议标志错误
ERR_FUN = 7 # TCP回复指令和发送指令不匹配
ERR_NOTTCP = 8 # 发送错误
STATE_NOT_READY = 9 # 未准备好运动
INVALID = 10 # 结果无效
ERR_OTHER = 11 # 其它错误
ERR_PARAM = 12 # 参数错误
class TrajState:
IDLE = 0
LOADING = 1
LOAD_SUCCESS = 2
LOAD_FAIL = 3
SAVING = 4
SAVE_SUCCESS = 5
SAVE_FAIL = 6
class BioGripperState:
IS_STOP = 0
IS_MOTION = 1
IS_DETECTED = 2
IS_FAULT = 3
IS_NOT_ENABLED = 0
IS_ENABLING = 1
IS_ENABLED = 2
class CollisionToolType:
NONE = 0
XARM_GRIPPER = 1
XARM_VACUUM_GRIPPER = 2
XARM_BIO_GRIPPER = 3
ROBOTIQ_2F85 = 4
ROBOTIQ_2F140 = 5
USE_PRIMITIVES = 20 # just for judgement, threshold.
CYLINDER = 21 # radius, height
BOX = 22 # x, y, z in tool coordinate direction
|
Jannes123/inasafe
|
refs/heads/develop
|
safe/common/test/test_version.py
|
10
|
# coding=utf-8
"""InaSAFE Disaster risk assessment tool developed by AusAid -
**Test class for version.py.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'ismail@kartoza.com'
__version__ = '2.2.0'
__revision__ = '$Format:%H$'
__date__ = '11/13/14'
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import unittest
import sys
from safe.common.version import get_version, current_git_hash
class TestVersion(unittest.TestCase):
def test_get_version(self):
"""Test for get_version."""
version_tuple = ('2', '2', '0', 'alpha', '0')
version = get_version(version_tuple)
if 'win32' in sys.platform or 'darwin' in sys.platform:
expected_version = '2.2.0.dev-master'
message = 'It should be %s but got %s' % (
expected_version, version)
self.assertEqual(expected_version, version, message)
else:
expected_version = '2.2.0.dev-ABCDEFG'
message = 'It should be %s but got %s' % (
expected_version[:9], version[:9])
self.assertEqual(expected_version[:9], version[:9], message)
message = 'Expected version that has length %d, got %d' % (
len(expected_version), len(version))
self.assertEqual(len(expected_version), len(version), message)
# Version tuple doesn't have length == 5
version_tuple = ('2', '2', '0', 'alpha')
self.assertRaises(RuntimeError, get_version, version_tuple)
# Version tuple item 4th is not alpha, beta, rc, final
version_tuple = ('2', '2', '0', 'avocado', '0')
self.assertRaises(RuntimeError, get_version, version_tuple)
# Final version
version_tuple = ('2', '2', '0', 'final', '0')
version = get_version(version_tuple)
self.assertEqual(version, '2.2.0', 'The version should be 2.2.0')
def test_get_current_hash(self):
"""Test for get_current_hash."""
git_hash = current_git_hash()
self.assertEqual(len(git_hash), 7)
if __name__ == '__main__':
unittest.main()
|
leveryd/autopwn
|
refs/heads/master
|
autopwn/__init__.py
|
3
|
#!/usr/bin/env python3
import argparse
import copy
import cmd
import collections
import operator
import os
import random
import re
import readline
import shlex
import subprocess
import sys
import threading
import time
from collections import OrderedDict, defaultdict
from distutils.spawn import find_executable
from locale import getlocale
from subprocess import Popen, PIPE
from time import gmtime, strftime
import inquirer
from screenutils import list_screens, Screen
import yaml
# Aidan Marlin @ NCC Group
# Project born 201502
# Project reborn 201505
class Arguments:
argparse_description = '''
autopwn 0.25.1
By Aidan Marlin
Email: aidan [dot] marlin [at] nccgroup [dot] trust'''
argparse_epilog = '''
Specify targets and run sets of tools against them.
The autopwn shell should feel familiar, it is based on
msfconsole. You can 'use' tools and assessments, and 'set'
options for them. You can then 'save' the options, and 'run'
the tool or assessment against the specified target. Instead
of 'set'ing tool options, you can 'load' a target file if you
wish. 'load'ing a target file will ignore modules defined in
target file.
A target file flag is also supported which will set autopwn running
tools or assessments defined in the targets file against targets also
specified in the target file. No interaction necessary.
Format of the target file can be be:
targets:
- target_name: <name_of_target>
target: <ip_address/device_name/directory/etc>
target_list: <target_list_file>
port_number: <port>
protocol: <http|https|ssh|etc>
url: <path>
user_file: <file>
password_file: <file>
modules: <list_of_modules_to_run_against_target>
Compulsory options are specified by tool, but normally include
'target_name' and 'target' or just 'target_list'.
'target_list' allows users to specify a list of targets separated by
a new line. When specifying 'target_list', 'target' and 'target_name'
should not be specified.
Example file:
targets:
- target_name: test
target: 127.0.0.1
url: /test
port_number: 80
protocol: https
user_file: /tmp/users
password_file: /tmp/passwords
modules: ['tool/nmap', 'tool/hydra', 'assessment/webapp']
Another example file using target_list:
targets:
- target_list: file_with_list_of_targets
url: /test
port_number: 80
protocol: https
user_file: /tmp/users
password_file: /tmp/passwords
modules: ['tool/nmap', 'tool/hydra', 'assessment/webapp']
autopwn uses the tools/ directory under autopwn to
load tool definitions, which are yaml files. You can
find some examples in the directory already. If you
think one is missing, mention it on GitHub or email
me and I might add it.
autopwn also uses assessments/ for assessment definitions.
Instead of selecting which tools you would like to run,
you specify which assessment you would like to run.
Have fun!
Legal purposes only..
'''
def __init__(self, argslist):
self.parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.argparse_description,
epilog=self.argparse_epilog)
self.parser.add_argument('-t', '--targets',
required=True,
help='The file containing the targets')
self.parser.add_argument('-d', '--assessment_directory',
help='Specify assessment directory')
self.parser.add_argument('-s', '--with_screen',
action='store_true',
help='Run tools in screen session')
self.parser.add_argument('-p', '--parallel',
action='store_true',
help='Run tools in parallel regardless of assessment or '
'global parallel option')
self.parser = self.parser.parse_args(argslist)
# Load targets file
config = Configuration(True)
Load(config, self.parser.targets)
# Process boolean command line arguments
if self.parser.with_screen == True:
config.arguments['screen'] = True
if self.parser.parallel == True:
config.arguments['parallel'] = True
print("autopwn v0.25.1 - Autoloading targets and modules")
print()
# Check for duplicate target names
dup_check = {}
# TODO Check for dups when target_list specified ...
#print("debug: config.instance == " + str(config.instance))
#for target in config.target_objects['targets']:
# if dup_check.get(target['target_name'],None) == None:
# dup_check[target['target_name']] = True
# else:
# Error(110,"[E] The following duplicate target_name was identified: " + target['target_name'])
# Check for module presence, which is a requirement when running CLI only
for target in config.target_objects['targets']:
if target.get('target', None) == None and target.get('target_list', None) != None:
# Open target_list file and assign to type(list)
try:
with open(target['target_list']) as f:
target_list = f.read().splitlines()
except:
Error(130, "[E] Could not open target_list file")
for individual_target in target_list:
# Check modules
self.check_modules(config, target)
target['target_name'] = individual_target.replace('/', '_')
target['target'] = individual_target.replace('/', '_')
# Set and save for each target in list
AutoSet(config,target)
View('command_line_pre_save',config,target=target)
Save(config)
View('command_line_post_save',config,target=target)
# Execute
Run(config)
return
# No target_list, carry on
self.check_modules(config, target)
# Set and save for each target in list
AutoSet(config,target)
View('command_line_pre_save',config,target=target)
Save(config)
View('command_line_post_save',config,target=target)
# Reset for next target
config.instance = {}
config.instance['tool'] = []
config.instance['config'] = {}
Run(config)
def check_modules(self, config, target):
if target.get('modules',False) == False:
Error(90,"[E] One of the targets has no modules defined")
for module in target['modules']:
Use(config,module)
# Check resource exists
if config.status['resource_found'] == False:
Error(100,"[E] A tool or assessment could not be found")
class Configuration:
def __init__(self, command_line):
self.status = {}
self.status['log_started'] = False
self.status['resource_found'] = False
self.status['command_line'] = command_line
self.status['file_found'] = False
self.global_config = {}
self.tools = []
self.assessments = []
self.job_queue = []
self.arguments = ddict_options = defaultdict(lambda : '')
self.instance = {}
self.instance['tool'] = []
self.instance['config'] = {}
self.load("tools")
self.load("assessments")
self.load("global_config")
# Remove / from completer delim so tab completion works
# with tool/nmap, for example
old_delims = readline.get_completer_delims()
readline.set_completer_delims(old_delims.replace('/', ''))
def find_path(self, candidate):
basepath = os.path.dirname(candidate)
tools_dir = os.path.join(basepath, 'tools')
if os.path.exists(tools_dir):
return basepath
else:
return None
def load(self, load_type):
pathname = os.path.abspath(self.find_path(__file__) \
or self.find_path(sys.argv[0]))
if load_type == "tools":
load_directory = os.path.abspath(pathname) + "/tools/"
load_string = "Tools"
elif load_type == "assessments":
load_directory = os.path.abspath(pathname) + "/assessments/"
load_string = "Assessments"
elif load_type == "global_config":
load_directory = os.path.abspath(pathname) + "/"
load_string = "Global configuration"
if not os.path.isdir(load_directory):
Error(10,"[E] " + load_string + " directory does not exist")
for file in os.listdir(load_directory):
if file.endswith(".apc"):
stream = open(load_directory + file, 'r')
objects = yaml.load(stream)
# TODO Make this better
if load_type == "tools":
objects['search_name'] = "tool/" + objects['name']
self.tools.append(objects)
elif load_type == "assessments":
objects['search_name'] = "assessment/" + objects['name']
self.assessments.append(objects)
elif load_type == "global_config":
self.global_config = objects
class Error:
def __init__(self, error_code, error_message):
print(error_message)
sys.exit(error_code)
class Search:
def __init__(self, config, search_string):
self.search(config.assessments,"Assessment",search_string)
self.search(config.tools,"Tool",search_string)
def search(self,config_item,item_type_string,search_string):
print('{0:30} {1}'.format(item_type_string, "Description"))
print("-"*64)
print()
for item in config_item:
if search_string in item['search_name'] \
or str.lower(search_string) in str.lower(item['description']):
name = item['search_name']
description = item['description']
if (sys.stdout.isatty()) == True:
description = '\x1b[%sm%s\x1b[0m' % \
(';'.join(['32']), description)
print('{0:30} {1}'.format(name, description))
print()
class Use:
def __init__(self, config, arg):
config.instance = {}
config.instance['tool'] = []
config.instance['config'] = {}
config.instance['config']['assessment'] = False
config.instance['config']['single_tool'] = False
resource = arg.split('/')
if resource[0] == 'tool':
self.use_tool(config,resource[1])
elif resource[0] == 'assessment':
self.use_assessment(config,resource[1])
else:
config.status['resource_found'] = False
return
def use_tool(self, config, tool_name):
config.instance['config']['single_tool'] = True
config.status['resource_found'] = False
for tool in config.tools:
if tool['name'] == tool_name:
for dependency in tool['dependencies']:
# Placeholder replacement
dependency_placeholder = defaultdict(lambda : '')
dependency_placeholder['tools_directory'] = config.global_config['tools_directory']
# Option replacements
dependency = dependency.format(**dependency_placeholder)
if Process.binary_exists(self, dependency) != True:
print("[I] Missing binary/script - " + dependency)
return
config.status['resource_found'] = True
config.instance['tool'].append(tool['name'])
# Set default values for options
for option in tool['options']:
default_value = tool['options'][option].\
get('default_value', None)
if default_value != None:
config.instance['config'][option] = str(default_value)
break
if config.status['resource_found'] == False:
print("Tool not found")
return
def use_assessment(self, config, assessment_name):
config.instance['config']['assessment'] = True
config.instance['config']['assessment_name'] = assessment_name
for assessment in config.assessments:
if assessment['name'] == assessment_name:
config.status['resource_found'] = True
# Find all tools with assessment type
for tool in config.tools:
for assessment_type in tool['assessment_groups']:
if assessment_type == assessment_name:
config.instance['tool'].append(tool['name'])
class Show:
def __init__(self, config, arg):
if arg == 'options':
self.show_options(config)
elif arg == 'jobs':
self.show_jobs(config)
elif arg == 'config':
self.show_config(config)
else:
self.show_help(config)
def show_help(self,config):
info = '''
Valid arguments for show are:
options - Show options for tool or assessment
jobs - Show jobs
config - Show autopwn config
'''
print(info)
return True
def show_config(self,config):
print()
print(" {0:30} {1}".format("Option", "Value"))
print(" "+"-"*48)
for option in config.global_config:
print(" {0:30} {1}".format(option, config.global_config[option]))
print()
def show_jobs(self,config):
if len(config.job_queue) == 1:
print("There is 1 job in the queue")
elif len(config.job_queue) > 1:
print("There are " + str(len(config.job_queue)) + " jobs in the queue")
print()
for item in config.job_queue:
print(item['name'] + " running for " + item['options']['target_name'])
print()
else:
print("1. Swap bits")
print("2. Periodically switch on Caps Lock")
print("3. Send scan results home")
print("4. ...")
print("5. Fragment drive")
print("6. Emulate single blown pixel")
print("7. Recommend Windows to the user")
def show_options(self,config):
if len(config.instance['tool']) == 0:
print("You need to select a tool or assessment first.")
return False
# Determine what options are needed for tool(s)
print("Options for tool/assessment.")
print()
print(" {0:16} {1:16} {2:32} {3}".format("Option", "Value", "Example Values", "Required"))
print(" "+"-"*96)
option_displayed = []
for tool in config.tools:
if tool['name'] in config.instance['tool']:
for option in tool['options']:
required = tool['options'][option].\
get('required',False)
required_string = str(required)
# If required option is list, print list
if type(required) is list:
required_string = ""
for option_required in required:
required_string = required_string + option_required + " "
required_string = required_string.strip()
required_string = required_string.replace(' ',' or ')
example_values = str(tool['options'][option].\
get('example_values',None))
option_value = config.instance['config'].get(option,'')
# Only show option once
if option not in option_displayed:
print(" {0:16} {1:16} {2:32} {3}".\
format(option,option_value,example_values,required_string))
option_displayed.append(option)
print()
class Unset:
def __init__(self, config, arg):
context = ''
args = arg.split(" ")
# Check number of arguments specified
if len(args) != 1:
print("Wrong number of arguments specified for set")
return
option = args[0]
# If global.some_option set, switch context
option_with_context = option.split('.')
if len(option_with_context) == 2:
context = option_with_context[0]
option = option_with_context[1]
# If context is 'global', set in global config file and load?
if context == 'global':
config.global_config[option] = ''
# TODO check file exists etc
pathname = os.path.abspath(config.find_path(__file__) or config.find_path(sys.argv[0]))
autopwn_config_file = os.path.abspath(pathname) + "/autopwn.apc"
with open(autopwn_config_file, 'w') as global_config_file:
global_config_file.write( yaml.dump(config.global_config, default_flow_style=True) )
config.load("global_config")
else:
del config.instance['config'][option]
print(option + " = " + "''")
# When a target file is specified
class AutoSet:
def __init__(self, config, target_objects):
for option in target_objects:
config.instance['config'][option] = str(target_objects[option])
class Set:
def __init__(self, config, arg):
context = ''
args = arg.split(" ")
# Check number of arguments specified
if args[0] != "command":
if len(args) != 2:
print("Wrong number of arguments specified for set")
return
option = args[0]
value = args[1]
else:
if len(args) >= 2:
option = args[0]
value = re.sub('^command ','', arg)
else:
print("Wrong number of arguments specified for set")
return
# If global.some_option set, switch context
option_with_context = option.split('.')
if len(option_with_context) == 2:
context = option_with_context[0]
option = option_with_context[1]
# Boolean conversions
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
# If context is 'global', set in global config file and load?
if context == 'global':
config.global_config[option] = value
# TODO check file exists etc
pathname = os.path.abspath(config.find_path(__file__) or config.find_path(sys.argv[0]))
autopwn_config_file = os.path.abspath(pathname) + "/autopwn.apc"
with open(autopwn_config_file, 'w') as global_config_file:
global_config_file.write( yaml.dump(config.global_config, default_flow_style=True) )
config.load("global_config")
else:
config.instance['config'][option] = value
print(option + " = " + str(value))
class Process:
def __init__(self, config):
info = {}
if len(config.job_queue) == 0:
print("No jobs to run")
return
for instance in config.job_queue:
instance['parallel'] = config.arguments['parallel'] or config.global_config['parallel']
instance['options']['date'] = strftime("%Y%m%d_%H%M%S%z")
instance['options']['date_day'] = strftime("%Y%m%d")
instance['options']['tools_directory'] = config.global_config['tools_directory']
instance['options']['output_dir'] = instance['options']['date_day'] + \
"_autopwn_" + \
instance['options']['target_name']
if config.arguments['screen'] == True:
if Process.binary_exists('screen') != True and self.binary_exists('bash') != True:
Error(50,"[E] Missing binary - screen or bash")
instance['screen_name'] = "autopwn_" + \
instance['options']['target_name'] + "_" + \
instance['options']['target'] + "_" + \
instance['name']
instance['execute_string'] = "screen -D -m -S " + \
instance['screen_name'] + " " + "bash -c '" + \
instance['execute_string'] + \
"'"
ddict_options = defaultdict(lambda : '')
for option in instance['options']:
ddict_options[option] = instance['options'][option]
# Option replacements
instance['execute_string'] = instance['execute_string'].format(**ddict_options)
# Run jobs
Execute(config)
def binary_exists(self, binary_string):
try:
which_return_code = subprocess.call(["which",binary_string],stdout=open(os.devnull,'wb'),stderr=open(os.devnull,'wb'))
if which_return_code == 0:
return True
else:
find_return_code = subprocess.call("find " + binary_string,stdout=open(os.devnull,'wb'),stderr=open(os.devnull,'wb'),shell=True)
if find_return_code == 0:
return True
else:
return False
except OSError as e:
if e.errno == os.errno.ENOENT:
Error(55,"[E] 'which' binary couldn't be found")
else:
# Not sure what's happening at this point
raise
# Load target files
class Load:
def __init__(self, config, arg):
# Load targets file
if os.path.isfile(arg) == True:
stream = open(arg, 'r')
try:
target_objects = yaml.load(stream)
config.target_objects = target_objects
except:
Error(1,"[E] Targets file is not valid YAML format")
else:
Error(100,"[E] Targets file does not exist")
# Check validity of targets file
if target_objects.get('targets', None) == None:
Error(2,"[E] Targets file missing targets entry")
else:
for target in target_objects['targets']:
# If targets exists we must check its type before interrogating
if type(target) != dict:
Error(3,"[E] Target entry missing target_name and/or target")
if target.get('target_list') == None and \
target.get('target', None) == None:
Error(3,"[E] Target entry missing target_name and/or target")
# If autopwn shell is in use, we should AutoSet()
if config.status['command_line'] != True:
for target in target_objects['targets']:
AutoSet(config,target)
class Save:
def __init__(self, config):
if len(config.instance['tool']) == 0:
print("No tool options to save")
return
for imported_tool in config.tools:
for selected_tool in config.instance['tool']:
if selected_tool == imported_tool['name']:
config.job_queue.append(copy.deepcopy(imported_tool))
config.job_queue_add_success = True
config.job_queue[-1]['options'] = {}
for option in config.instance['config']:
config.job_queue[-1]['options'][option] = \
config.instance['config'][option]
# Check all required parameters exist before save
for option in imported_tool['options']:
parameter_found = False
tool_option = imported_tool['options'][option]\
.get('required', False)
if type(tool_option) == list:
### Check that at least one exists
for required_option in tool_option:
parameter_found = parameter_found or \
self.check_required_exists\
(config,required_option)
if tool_option == True:
parameter_found = self.check_required_exists\
(config,option)
if parameter_found == False and tool_option == True:
self.remove_instance(config)
return
# Set default option values for options
# (An option value becomes another option value)
for option in imported_tool['options']:
default_option_value = imported_tool['options'][option].\
get('default_option_value', None)
# Is there a default option
if default_option_value != None:
config.instance['config'][option] = \
str(config.instance['config'][option])
if config.status['command_line'] != True:
if len(config.job_queue) == 1:
print("There is 1 job in the queue")
else:
print("There are " + str(len(config.job_queue)) + \
" jobs in the queue")
def remove_instance(self, config):
# TODO Check this actually works
# now that assessments are in
config.job_queue.pop()
config.job_queue_add_success = False
if config.status['command_line'] != True:
print("Some required parameters have not been set")
def check_required_exists(self, config, option):
parameter_found = False
for tool in config.tools:
if config.job_queue[-1]['name'] == tool['name']:
instance_tool = tool
break
if option in config.job_queue[-1]['options'] or \
instance_tool['options'][option]\
.get('default_option_value', None) != None:
parameter_found = True
return parameter_found
class Execute:
thread = []
index = 0
def __init__(self, config):
for instance in config.job_queue:
# Create log directory in CWD
if not os.path.exists(instance['options']['output_dir']):
try:
os.makedirs(instance['options']['output_dir'])
except OSError as e:
Error(20,"[E] Error creating output directory: " + e)
if 'url' in instance:
log = Log(config, os.getcwd(), False, 'tool_string',"# Executing " + \
instance['name'] + " tool (" + instance['url'] + ") for " + \
instance['options']['target_name'] + ":\n" + \
instance['execute_string'])
else:
log = Log(config, os.getcwd(), False, 'tool_string',"# Executing " + \
instance['name'] + " tool:\n# " + \
instance['execute_string'])
time.sleep (0.1);
self.thread.append(RunThreads(config,instance))
# If main process dies, everything else *SHOULD* as well
self.thread[-1].daemon = True
# Start threads
self.thread[-1].start()
# Parallel or singular?
if instance['parallel'] != True:
while threading.activeCount()>1:
pass
self.index = self.index + 1
else:
print(instance['execute_string'])
pass
if instance['parallel'] == True:
while threading.activeCount()>1:
pass
#for tid in self.thread:
# tid.join(1)
class RunThreads (threading.Thread):
def __init__(self, config, instance):
threading.Thread.__init__(self)
self.tool_stdout = ''
self.tool_sterr = ''
self.instance = instance
self.config = config
def execute_tool(self, config):
# Always check any tools provided by
# community members
# Bad bug using this and no shell for Popen,
# will come back to this
#command_arguments = shlex.split(tool_execute_string)
proc = Popen(self.instance['execute_string'], stdout=PIPE, stderr=PIPE, shell=True)
decode_locale = lambda s: s.decode(getlocale()[1])
self.tool_stdout, self.tool_stderr = map(decode_locale, proc.communicate())
exitcode = proc.returncode
# Log if tool did not exit with zero status
if exitcode != 0:
log = Log(self.config, os.getcwd(), False, 'tool_string', "# WARNING: " + \
self.instance['name'] + " for " + \
self.instance['options']['target_name'] + " exited with non-zero return code")
def run(self):
print("[+] Launching " + self.instance['name'] + \
" for " + self.instance['options']['target_name'])
self.execute_tool(self.config)
print("[-] " + self.instance['name'] + " for " + \
self.instance['options']['target_name'] + " is done..")
# Should we create a stdout log for this tool?
stdout_boolean = self.instance['stdout']
if stdout_boolean == True:
log = Log(self.config, os.getcwd() + "/" + self.instance['options']['output_dir'],
self.instance['options']['target_name'] + "_" + self.instance['name'],
'tool_output', self.tool_stdout)
log = Log(self.config, os.getcwd(), False, 'tool_string', "# " + \
self.instance['name'] + " for " + \
self.instance['options']['target_name'] + " has finished")
class Log:
def __init__(self, config, directory, log_filename, log_type, log_string):
date = strftime("%Y%m%d")
date_time = strftime("%Y%m%d %H:%M:%S %z")
date_time_filename = strftime("%Y%m%d_%H%M%S%z")
if log_type == 'tool_output':
try:
# log_filename is pikey, make it better
log_file = open(directory + "/" + date_time_filename + \
"_" + log_filename + "_stdout.log","a")
except OSError as e:
Error(30,"[E] Error creating log file: " + e)
log_file.write(log_string)
log_file.close()
if log_type == 'tool_string':
try:
log_file = open(date + "_autopwn_commands.log","a")
except OSError as e:
Error(30,"[E] Error creating log file: " + e)
if config.status['log_started'] != True:
log_file.write("## autopwn 0.25.1 command output\n")
log_file.write("## Started logging at " + date_time + "...\n")
config.status['log_started'] = True
log_file.write("# " + date_time + "\n")
log_file.write(log_string + "\n")
log_file.close()
if log_type == 'individual_target':
try:
log_file = open(directory + "/target","w")
except OSError as e:
Error(30,"[E] Error creating log file: " + e)
log_file.write(log_string + "\n")
log_file.close()
class Run:
def __init__(self, config):
# Process job queue (replace placeholders)
Process(config)
class Debug:
def __init__(self, config, arg):
import IPython; IPython.embed()
class Clear:
def __init__(self, config, arg):
config.job_queue = []
print("Job queue cleared")
class View:
def __init__(self, view, config, **kwargs):
if kwargs is None:
kwargs = defaultdict(lambda : '')
if view == 'load':
if config.status['file_found'] == True:
print("Loaded target file")
if view == 'clear':
pass
if view == 'command_line_pre_save':
for key, value in kwargs.items():
print("Loading " + value['target_name'] + " with " + config.instance['tool'][-1] + "...",end="")
if view == 'command_line_post_save':
for key, value in kwargs.items():
if config.job_queue_add_success == True:
print("Done!")
else:
print("Failed!")
if view == 'use':
option_displayed = []
# Show assessment info
if config.instance['config']['assessment'] == True:
for assessment in config.assessments:
if assessment['name'] == \
config.instance['config']['assessment_name']:
print('Name: ' + assessment['name'])
print('Long name: ' + assessment['long_name'])
print('Description: ' + assessment['description'])
print()
print('The follwing tools are used in this assessment:')
for tool in config.tools:
if assessment['name'] in tool['assessment_groups']:
print("- " + tool['name'])
print()
if config.instance['config']['single_tool'] == True:
if config.status['resource_found'] == False:
print("Could not load tool or assessment")
else:
for tool in config.tools:
if tool['name'] == config.instance['tool'][0]:
print('Name: ' + tool['name'])
print('Description: ' + tool['description'])
print('URL: ' + tool['url'])
print()
class CleanUp:
def __init__(self):
# Kill screen sessions. Needs improvement
for screen in list_screens():
if screen.name.startswith("autopwn"):
screen.kill()
class Shell(cmd.Cmd):
config = Configuration(False)
prompt = 'autopwn > '
def cmdloop(self, intro=None):
try:
cmd.Cmd.cmdloop(self, intro)
except KeyboardInterrupt as e:
print()
print("Type 'quit' to exit autopwn shell")
self.cmdloop()
def emptyline(self):
pass
def do_shell(self, arg):
'Execute shell commands'
os.system(arg)
def do_clear(self, arg):
'Clear job queue'
Clear(self.config,arg)
View('clear',self.config)
def do_search(self, arg):
'Search function'
Search(self.config,arg)
View('search',self.config)
def do_debug(self, arg):
'Drop to IPython shell'
Debug(self.config,arg)
View('debug',self.config)
def do_show(self, arg):
'Show information'
Show(self.config,arg)
View('show',self.config)
def complete_show(self, text, line, begin, end):
operations = ['options','jobs','config']
if not text:
completions = operations
else:
completions = [ operation
for operation in operations
if text in operation
]
return completions
def do_load(self, arg):
'Load target file'
Load(self.config,arg)
View('load',self.config)
def do_save(self, arg):
'Save instance settings'
Save(self.config)
View('save',self.config)
def do_run(self, arg):
'Run job queue'
Run(self.config)
View('run',self.config)
def do_use(self, arg):
'Setup a tool or assessment'
Use(self.config,arg)
View('use',self.config,target=self.config.instance)
if self.config.status['resource_found'] == True:
if (sys.stdout.isatty()) == True:
arg = '\x1b[%sm%s\x1b[0m' % \
(';'.join(['31']), arg)
self.prompt = 'autopwn (' + arg + ') > '
def complete_use(self, text, line, begin, end):
completions = ''
if not text:
# Add assessments
completions = [ assessment['search_name']
for assessment in self.config.assessments
]
# Add tools
completions = completions + [ tool['search_name']
for tool in self.config.tools
]
else:
# Add assessments which match
completions = [ assessment['search_name']
for assessment in self.config.assessments
if line.split(' ')[1] in assessment['search_name']
]
# Add tools which match
completions = completions + [ tool['search_name']
for tool in self.config.tools
if line.split(' ')[1] in tool['search_name']
]
return completions
def do_set(self, arg):
'Set configuration option'
Set(self.config,arg)
View('set',self.config)
def complete_set(self, text, line, begin, end):
for tool in self.config.tools:
if tool['name'] in self.config.instance['tool']:
completions = tool['options']
if text != None:
completions = [ parameter
for parameter in completions
if text in parameter
]
return completions
def do_unset(self, arg):
'Clear configuration option'
Unset(self.config,arg)
View('unset',self.config)
def complete_unset(self, text, line, begin, end):
for tool in self.config.tools:
if tool['name'] in self.config.instance['tool']:
completions = tool['options']
if text != None:
completions = [ parameter
for parameter in completions
if text in parameter
]
return completions
def do_bye(self, arg):
'Quit autopwn'
self.terminate()
def do_exit(self, arg):
'Quit autopwn'
self.terminate()
def do_quit(self, arg):
'Quit autopwn'
self.terminate()
def terminate(self):
'Exit Autopwn'
quote = []
quote.append("Never underestimate the determination of a kid who is time-rich and cash-poor.")
quote.append("There are few sources of energy so powerful as a procrastinating college student.")
quote.append("I/O, I/O, It's off to disk I go. A bit or byte to read or write, I/O, I/O, I/O...")
quote.append("SUPERCOMPUTER: what it sounded like before you bought it.")
quote.append("Is reading in the bathroom considered Multi-Tasking?")
quote.append("Premature optimisation is the root of all evil.")
quote.append("The first rule of optimisation is: Don't do it. The second rule of optimisation is: Don't do it yet.")
quote.append("Q: How many software engineers does it take to change a lightbulb? A: It can't be done; it's a hardware problem.")
quote.append("Hackers are not crackers.")
quote.append("Behind every successful Coder there an even more successful De-coder ")
quote.append("If at first you don't succeed; call it version 1.0.")
quote.append("F*ck it, we'll do it in production.")
quote.append("Programmers are tools for converting caffeine into code.")
quote.append("Those who can't write programs, write help files.")
quote.append("Should array indices start at 0 or 1? My compromise of 0.5 was rejected without, I thought, proper consideration.")
quote.append("Fifty years of programming language research, and we end up with C++?")
quote.append("Software is like sex: It’s better when it’s free.")
quote.append("If debugging is the process of removing bugs, then programming must be the process of putting them in.")
quote.append("Always code as if the guy who ends up maintaining your code will be a violent psychopath who knows where you live.")
quote.append("C programmers never die. They are just cast into void.")
quote.append("19 Jan 2038 at 3:14:07 AM")
quote.append("If Python is executable pseudocode, then perl is executable line noise.")
quote.append("The only difference between a bug and a feature is the documentation.")
print(random.choice(quote))
CleanUp()
sys.exit(0)
def _main(arglist):
# Process command line arguments
if len(sys.argv) > 1:
Arguments(sys.argv[1:]).parser
else:
# Drop user to shell
Shell().cmdloop("autopwn 0.25.1 shell. Type help or ? to list commands.\n")
def main():
try:
_main(sys.argv[1:])
except KeyboardInterrupt:
CleanUp()
print()
print("[E] Quitting!")
sys.exit(1)
if __name__ == "__main__":
main()
|
meigrafd/Sample-Code
|
refs/heads/master
|
_pygame/sprite.py
|
2
|
import pygame
from operator import truth
from functools import wraps
# Flag values for anchors.
# TODO: use Rect's constants
ANCHOR_TOPLEFT = 101
ANCHOR_TOPRIGHT = 102
ANCHOR_BOTTOMLEFT = 103
ANCHOR_BOTTOMRIGHT = 104
ANCHOR_CENTER = 105
ANCHOR_MIDTOP = 106
ANCHOR_MIDBOTTOM = 107
ANCHOR_MIDLEFT = 108
ANCHOR_MIDRIGHT = 109
def call_hook_method(hook_name):
"""decorator to wrap a method with a call to a hook method.
The hook should return a boolean deciding whether to continue
with the original method call."""
def on_call(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
hook = getattr(self, hook_name, None)
if hook:
if hook(method, *args, **kwargs):
return method(self, *args, **kwargs)
return wrapped
return on_call
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
"""initialize sprite instance
Initializes attributes to default values, and optionally
adds it to given groups.
"""
self.image = self.original = None
self.rect = None
self.dirty = False
# Initialize position
self.anchor = ANCHOR_TOPLEFT
self.position = None
self.offset = (0, 0)
self.layer = 0
# Initialize visual attributes
self.scale = 1
self.rotate = 0
self.visible = True
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def draw(self, surface):
"""draw the sprite's image on a surface
Sprite.draw(surface): return Rect
This should be called by a group's own `draw` method.
On failure or if sprite should not be drawn, returns 0.
"""
if (self.visible):
return surface.blit(self.image, self.rect)
else:
return 0
def on_visual_change(self, *args, **kwargs):
"""mark sprite as dirty on any visual change
"""
self.dirty = True
return True
@call_hook_method('on_visual_change')
def set_image(self, img):
"""set a new image object for the sprite
"""
self.image = self.original = img
self.update_image()
def update_image(self):
"""update the sprite's image object
usually useful for transformations, this method does
not change the 'original' attribute."""
img = self.original
if img is not None:
if self.scale != 1:
img = pygame.transform.scale(img, self.scaled_size())
if self.rotate != 0:
img = pygame.transform.rotate(img, self.rotate)
self.image = img
self.rect = img.get_rect()
self.move_to(self.position)
def anchor_value(self):
"""return actual position of sprite's anchor
If anchor was provided in coordinates, use them.
Otherwise, translate anchor flags to coordinates.
"""
#TODO handle negative values
if type(self.anchor) is tuple:
return self.anchor
else:
(w, h) = self.rect.size
return {
ANCHOR_TOPLEFT: (0, 0),
ANCHOR_TOPRIGHT: (w, 0),
ANCHOR_BOTTOMLEFT: (0, h),
ANCHOR_BOTTOMRIGHT: (w, h),
ANCHOR_CENTER: (w / 2, h / 2),
ANCHOR_MIDTOP: (w / 2, 0),
ANCHOR_MIDBOTTOM: (w / 2, h),
ANCHOR_MIDLEFT: (0, h / 2),
ANCHOR_MIDRIGHT: (w, h / 2)
}[self.anchor]
def update_position(self):
""" re-calculating the sprite's rect position
"""
(x, y) = self.position
(off_x, off_y) = self.offset
(anc_x, anc_y) = self.anchor_value()
self.rect.topleft = (x + off_x - anc_x, y + off_y - anc_y)
@call_hook_method('on_visual_change')
def move_to(self, pos):
"""move sprite to a certain position
"""
#TODO handle float values
self.position = pos
if pos:
self.update_position()
@call_hook_method('on_visual_change')
def move_by(self, delta):
"""move sprite by a certain delta
"""
(delta_x, delta_y) = delta
(current_x, current_y) = self.position
self.move_to((current_x + delta_x, current_y + delta_y))
@call_hook_method('on_visual_change')
def set_offset(self, offset):
self.offset = offset
self.update_position()
@call_hook_method('on_visual_change')
def make_visible(self):
self.visible = True
@call_hook_method('on_visual_change')
def make_invisible(self):
self.visible = False
@call_hook_method('on_visual_change')
def toggle_visibility(self):
self.visible = not self.visible
@call_hook_method('on_visual_change')
def scale_to(self, ratio):
"""set sprite's scale ratio (overwriting)
Ratio must be a positive float.
"""
if ratio < 0:
raise AttributeError("ratio must be a positive float")
self.scale = ratio
self.update_image()
@call_hook_method('on_visual_change')
def scale_by(self, ratio):
"""set sprite's scale ratio (accumalating)
Ratio must be a positive float.
"""
self.scale_to(self.scale + ratio)
def scaled_size(self):
"""return the sprite's calculated size, after scaling
"""
(width, height) = self.original.get_size()
width = (int)(width * self.scale)
height = (int)(height * self.scale)
return (width, height)
@call_hook_method('on_visual_change')
def rotate_to(self, degree):
"""rotate sprite's image by a degree (overwriting)
"""
self.rotate = degree % 360 # TODO magic number?
self.update_image()
@call_hook_method('on_visual_change')
def rotate_by(self, degree):
""" rotate sprite's image by a degree (accumalating)
"""
self.rotate_to(self.rotate + degree)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" \
% (self.__class__.__name__, len(self.__g))
class AggregatedSprite(Sprite):
"""aggregated sprite class collects many sprites into single entity
pygame.sprite.AggregatedSprite(*groups): return AggregatedSprite
The aggregated sprite holds a list of child sprites and propagates
every visual change to all of the child sprites.
"""
def __init__(self, *groups):
"""iniitalizes sprite
"""
# call super's initialization as usual.
super(AggregatedSprite, self).__init__(*groups)
# reset sprites list
self.sprites = []
# resets the rect and position which would be calculated
# according to added sprite.
self.rect = pygame.Rect(0, 0, 0, 0)
self.position = (0, 0)
def add_sprite(self, sprite):
"""add a sprite to the list of child sprites
"""
self.sprites.append(sprite)
def draw(self, surface):
"""draw child sprites in order
AggregatedSprite.draw(surface): return Rect
Returns a rectangle that is the union of all
child sprites' rects.
"""
#TODO consider sprite's layer attribute
ret = pygame.Rect(0, 0, 0, 0)
for spr in self.sprites:
r = spr.draw(surface)
if r != 0:
ret.union_ip(r)
return ret
def on_visual_change(self, method, *args, **kwargs):
"""propagate a visual attribute change to all child sprites
"""
super(AggregatedSprite, self).on_visual_change(method, *args, **kwargs)
if method.__name__ == 'move_to':
for spr in self.sprites:
spr.set_offset(args[0])
else:
for spr in self.sprites:
method(spr, *args, **kwargs)
if method.__name__ == 'move_by':
return False
return True
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group, ordered by layer
Group.sprites(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return sorted(self.spritedict,
key=lambda sprite: getattr(sprite, "layer", 0))
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r is not 0:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
if (hasattr(spr, 'draw')):
self.spritedict[spr] = spr.draw(surface)
else:
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r is not 0:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r is not 0:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
|
ltiao/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/__init__.py
|
306
|
"""
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest']
|
JFriel/honours_project
|
refs/heads/master
|
networkx/networkx/exception.py
|
42
|
# -*- coding: utf-8 -*-
"""
**********
Exceptions
**********
Base exceptions and errors for NetworkX.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)\nLoïc Séguin-C. <loicseguin@gmail.com>"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Exception handling
# the root of all Exceptions
class NetworkXException(Exception):
"""Base class for exceptions in NetworkX."""
class NetworkXError(NetworkXException):
"""Exception for a serious error in NetworkX"""
class NetworkXPointlessConcept(NetworkXException):
"""Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?"
In Graphs and Combinatorics Conference, George Washington University.
New York: Springer-Verlag, 1973.
"""
class NetworkXAlgorithmError(NetworkXException):
"""Exception for unexpected termination of algorithms."""
class NetworkXUnfeasible(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a problem
instance that has no feasible solution."""
class NetworkXNoPath(NetworkXUnfeasible):
"""Exception for algorithms that should return a path when running
on graphs where such a path does not exist."""
class NetworkXNoCycle(NetworkXUnfeasible):
"""Exception for algorithms that should return a cycle when running
on graphs where such a cycle does not exist."""
class NetworkXUnbounded(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a maximization
or a minimization problem instance that is unbounded."""
class NetworkXNotImplemented(NetworkXException):
"""Exception raised by algorithms not implemented for a type of graph."""
|
WillGuan105/django
|
refs/heads/master
|
tests/gis_tests/geoapp/sitemaps.py
|
452
|
from django.contrib.gis.sitemaps import KMLSitemap, KMZSitemap
from .models import City, Country
sitemaps = {'kml': KMLSitemap([City, Country]),
'kmz': KMZSitemap([City, Country]),
}
|
feist/pcs
|
refs/heads/master
|
pcs/stonith.py
|
1
|
import json
from pcs import (
resource,
utils,
)
from pcs.cli.common import parse_args
from pcs.cli.common.console_report import indent, error
from pcs.cli.common.errors import CmdLineInputError
from pcs.cli.fencing_topology import target_type_map_cli_to_lib
from pcs.cli.resource.parse_args import parse_create_simple as parse_create_args
from pcs.common import report_codes
from pcs.common.fencing_topology import (
TARGET_TYPE_NODE,
TARGET_TYPE_REGEXP,
TARGET_TYPE_ATTRIBUTE,
)
from pcs.lib.errors import LibraryError
import pcs.lib.resource_agent as lib_ra
# pylint: disable=too-many-branches, too-many-statements, protected-access
def stonith_show_cmd(lib, argv, modifiers):
# TODO remove, deprecated command
# replaced with 'stonith status' and 'stonith config'
resource.resource_show(lib, argv, modifiers, stonith=True)
print_stonith_levels(lib)
def stonith_status_cmd(lib, argv, modifiers):
resource.resource_status(lib, argv, modifiers, stonith=True)
print_stonith_levels(lib)
def stonith_config_cmd(lib, argv, modifiers):
resource.resource_config(lib, argv, modifiers, stonith=True)
print_stonith_levels(lib)
def print_stonith_levels(lib):
levels = stonith_level_config_to_str(
lib.fencing_topology.get_config()
)
if levels:
print("\n".join(indent(levels, 1)))
def stonith_list_available(lib, argv, modifiers):
"""
Options:
* --nodesc - do not show description of the agents
"""
modifiers.ensure_only_supported("--nodesc")
if len(argv) > 1:
raise CmdLineInputError()
search = argv[0] if argv else None
agent_list = lib.stonith_agent.list_agents(
describe=not modifiers.get("--nodesc"),
search=search,
)
if not agent_list:
if search:
utils.err("No stonith agents matching the filter.")
utils.err(
"No stonith agents available. "
"Do you have fence agents installed?"
)
for agent_info in agent_list:
name = agent_info["name"]
shortdesc = agent_info["shortdesc"]
if shortdesc:
print("{0} - {1}".format(
name,
resource._format_desc(
len(name + " - "), shortdesc.replace("\n", " ")
)
))
else:
print(name)
def stonith_list_options(lib, argv, modifiers):
"""
Options:
* --full - show advanced options
"""
modifiers.ensure_only_supported("--full")
if len(argv) != 1:
raise CmdLineInputError()
agent_name = argv[0]
print(resource._format_agent_description(
lib.stonith_agent.describe_agent(agent_name),
stonith=True,
show_all=modifiers.get("--full"),
))
def stonith_create(lib, argv, modifiers):
"""
Options:
* --before - specified resource inside a group before which new resource
will be placed inside the group
* --after - specified resource inside a group after which new resource
will be placed inside the group
* --group - specifies group in which resource will be created
* --force - allow not existing agent, invalid operations or invalid
instance attributes
* --disabled - created reource will be disabled
* --no-default-ops - do not add default operations
* --wait
* -f - CIB file
"""
modifiers.ensure_only_supported(
"--before", "--after", "--group", "--force", "--disabled",
"--no-default-ops", "--wait", "-f",
)
if modifiers.is_specified("--before") and modifiers.is_specified("--after"):
raise error("you cannot specify both --before and --after{0}".format(
"" if modifiers.is_specified("--group")
else " and you have to specify --group"
))
if not modifiers.is_specified("--group"):
if modifiers.is_specified("--before"):
raise error("you cannot use --before without --group")
if modifiers.is_specified("--after"):
raise error("you cannot use --after without --group")
if len(argv) < 2:
raise CmdLineInputError()
stonith_id = argv[0]
stonith_type = argv[1]
parts = parse_create_args(argv[2:])
settings = dict(
allow_absent_agent=modifiers.get("--force"),
allow_invalid_operation=modifiers.get("--force"),
allow_invalid_instance_attributes=modifiers.get("--force"),
ensure_disabled=modifiers.get("--disabled"),
use_default_operations=not modifiers.get("--no-default-ops"),
wait=modifiers.get("--wait"),
)
if not modifiers.get("--group"):
lib.stonith.create(
stonith_id, stonith_type, parts["op"],
parts["meta"],
parts["options"],
**settings
)
else:
adjacent_resource_id = None
put_after_adjacent = False
if modifiers.get("--after"):
adjacent_resource_id = modifiers.get("--after")
put_after_adjacent = True
if modifiers.get("--before"):
adjacent_resource_id = modifiers.get("--before")
put_after_adjacent = False
lib.stonith.create_in_group(
stonith_id, stonith_type, modifiers.get("--group"), parts["op"],
parts["meta"],
parts["options"],
adjacent_resource_id=adjacent_resource_id,
put_after_adjacent=put_after_adjacent,
**settings
)
def stonith_level_parse_node(arg):
"""
Commandline options: no options
"""
target_type_candidate, target_value_candidate = parse_args.parse_typed_arg(
arg,
target_type_map_cli_to_lib.keys(),
"node"
)
target_type = target_type_map_cli_to_lib[target_type_candidate]
if target_type == TARGET_TYPE_ATTRIBUTE:
target_value = parse_args.split_option(target_value_candidate)
else:
target_value = target_value_candidate
return target_type, target_value
def stonith_level_normalize_devices(argv):
"""
Commandline options: no options
"""
# normalize devices - previously it was possible to delimit devices by both
# a comma and a space
return ",".join(argv).split(",")
def stonith_level_add_cmd(lib, argv, modifiers):
"""
Options:
* -f - CIB file
* --force - allow not existing stonith device, allow not existing node
(target)
"""
modifiers.ensure_only_supported("-f", "--force")
if len(argv) < 3:
raise CmdLineInputError()
target_type, target_value = stonith_level_parse_node(argv[1])
lib.fencing_topology.add_level(
argv[0],
target_type,
target_value,
stonith_level_normalize_devices(argv[2:]),
force_device=modifiers.get("--force"),
force_node=modifiers.get("--force"),
)
def stonith_level_clear_cmd(lib, argv, modifiers):
"""
Options:
* -f - CIB file
"""
modifiers.ensure_only_supported("-f")
if len(argv) > 1:
raise CmdLineInputError()
if not argv:
lib.fencing_topology.remove_all_levels()
return
target_type, target_value = stonith_level_parse_node(argv[0])
# backward compatibility mode
# Command parameters are: node, stonith-list
# Both the node and the stonith list are optional. If the node is ommited
# and the stonith list is present, there is no way to figure it out, since
# there is no specification of what the parameter is. Hence the pre-lib
# code tried both. It deleted all levels having the first parameter as
# either a node or a device list. Since it was only possible to specify
# node as a target back then, this is enabled only in that case.
report_item_list = []
try:
lib.fencing_topology.remove_levels_by_params(
None,
target_type,
target_value,
None,
# pre-lib code didn't return any error when no level was found
ignore_if_missing=True
)
except LibraryError as e:
report_item_list.extend(e.args)
if target_type == TARGET_TYPE_NODE:
try:
lib.fencing_topology.remove_levels_by_params(
None,
None,
None,
argv[0].split(","),
# pre-lib code didn't return any error when no level was found
ignore_if_missing=True
)
except LibraryError as e:
report_item_list.extend(e.args)
if report_item_list:
raise LibraryError(*report_item_list)
def stonith_level_config_to_str(config):
"""
Commandline option: no options
"""
config_data = dict()
for level in config:
if level["target_type"] not in config_data:
config_data[level["target_type"]] = dict()
if level["target_value"] not in config_data[level["target_type"]]:
config_data[level["target_type"]][level["target_value"]] = []
config_data[level["target_type"]][level["target_value"]].append(level)
lines = []
for target_type in [
TARGET_TYPE_NODE, TARGET_TYPE_REGEXP, TARGET_TYPE_ATTRIBUTE
]:
if not target_type in config_data:
continue
for target_value in sorted(config_data[target_type].keys()):
lines.append("Target: {0}".format(
"=".join(target_value) if target_type == TARGET_TYPE_ATTRIBUTE
else target_value
))
level_lines = []
for target_level in sorted(
config_data[target_type][target_value],
key=lambda level: level["level"]
):
level_lines.append("Level {level} - {devices}".format(
level=target_level["level"],
devices=",".join(target_level["devices"])
))
lines.extend(indent(level_lines))
return lines
def stonith_level_config_cmd(lib, argv, modifiers):
"""
Options:
* -f - CIB file
"""
modifiers.ensure_only_supported("-f")
if argv:
raise CmdLineInputError()
lines = stonith_level_config_to_str(lib.fencing_topology.get_config())
# do not print \n when lines are empty
if lines:
print("\n".join(lines))
def stonith_level_remove_cmd(lib, argv, modifiers):
"""
Options:
* -f - CIB file
"""
modifiers.ensure_only_supported("-f")
if not argv:
raise CmdLineInputError()
target_type, target_value, devices = None, None, None
level = argv[0]
if len(argv) > 1:
target_type, target_value = stonith_level_parse_node(argv[1])
if len(argv) > 2:
devices = stonith_level_normalize_devices(argv[2:])
try:
lib.fencing_topology.remove_levels_by_params(
level,
target_type,
target_value,
devices
)
except LibraryError as e:
# backward compatibility mode
# Command parameters are: level, node, stonith, stonith...
# Both the node and the stonith list are optional. If the node is
# ommited and the stonith list is present, there is no way to figure it
# out, since there is no specification of what the parameter is. Hence
# the pre-lib code tried both. First it assumed the first parameter is
# a node. If that fence level didn't exist, it assumed the first
# parameter is a device. Since it was only possible to specify node as
# a target back then, this is enabled only in that case.
if target_type != TARGET_TYPE_NODE:
raise e
level_not_found = False
for report_item in e.args:
if (
# pylint: disable=no-member
report_item.code
==
report_codes.CIB_FENCING_LEVEL_DOES_NOT_EXIST
):
level_not_found = True
break
if not level_not_found:
raise e
target_and_devices = [target_value]
if devices:
target_and_devices.extend(devices)
try:
lib.fencing_topology.remove_levels_by_params(
level,
None,
None,
target_and_devices
)
except LibraryError as e_second:
raise LibraryError(*(e.args + e_second.args))
def stonith_level_verify_cmd(lib, argv, modifiers):
"""
Options:
* -f - CIB file
"""
modifiers.ensure_only_supported("-f")
if argv:
raise CmdLineInputError()
# raises LibraryError in case of problems, else we don't want to do anything
lib.fencing_topology.verify()
def stonith_fence(lib, argv, modifiers):
"""
Options:
* --off - use off action of fence agent
"""
del lib
modifiers.ensure_only_supported("--off")
if len(argv) != 1:
utils.err("must specify one (and only one) node to fence")
node = argv.pop(0)
if modifiers.get("--off"):
args = ["stonith_admin", "-F", node]
else:
args = ["stonith_admin", "-B", node]
output, retval = utils.run(args)
if retval != 0:
utils.err("unable to fence '%s'\n" % node + output)
else:
print("Node: %s fenced" % node)
def stonith_confirm(lib, argv, modifiers):
"""
Options:
* --force - do not warn user
"""
del lib
modifiers.ensure_only_supported("--force")
if len(argv) != 1:
utils.err("must specify one (and only one) node to confirm fenced")
node = argv.pop(0)
if not modifiers.get("--force"):
answer = utils.get_terminal_input(
(
"WARNING: If node {node} is not powered off or it does"
+ " have access to shared resources, data corruption and/or"
+ " cluster failure may occur. Are you sure you want to"
+ " continue? [y/N] "
).format(node=node)
)
if answer.lower() not in ["y", "yes"]:
print("Canceled")
return
args = ["stonith_admin", "-C", node]
output, retval = utils.run(args)
if retval != 0:
utils.err("unable to confirm fencing of node '%s'\n" % node + output)
else:
print("Node: %s confirmed fenced" % node)
# This is used only by pcsd, will be removed in new architecture
def get_fence_agent_info(lib, argv, modifiers):
"""
Options: no options
"""
del lib
modifiers.ensure_only_supported()
if len(argv) != 1:
utils.err("One parameter expected")
agent = argv[0]
if not agent.startswith("stonith:"):
utils.err("Invalid fence agent name")
runner = utils.cmd_runner()
try:
metadata = lib_ra.StonithAgent(runner, agent[len("stonith:"):])
info = metadata.get_full_info()
info["name"] = "stonith:{0}".format(info["name"])
print(json.dumps(info))
except lib_ra.ResourceAgentError as e:
utils.process_library_reports(
[lib_ra.resource_agent_error_to_report_item(e)]
)
except LibraryError as e:
utils.process_library_reports(e.args)
def sbd_watchdog_list(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if argv:
raise CmdLineInputError()
available_watchdogs = lib.sbd.get_local_available_watchdogs()
if available_watchdogs:
print("Available watchdog(s):")
for watchdog in sorted(available_watchdogs.keys()):
print(" {}".format(watchdog))
else:
print("No available watchdog")
def sbd_watchdog_list_json(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if argv:
raise CmdLineInputError()
print(json.dumps(lib.sbd.get_local_available_watchdogs()))
def sbd_watchdog_test(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if len(argv) > 1:
raise CmdLineInputError()
print(
"Warning: This operation is expected to force-reboot this system "
"without following any shutdown procedures."
)
if utils.get_terminal_input("Proceed? [no/yes]: ") != "yes":
return
watchdog = None
if len(argv) == 1:
watchdog = argv[0]
lib.sbd.test_local_watchdog(watchdog)
def sbd_enable(lib, argv, modifiers):
"""
Options:
* --request-timeout - HTTP request timeout
* --force - allow unknown SBD options
* --skip-offline - skip offline cluster nodes
* --no-watchdog-validation - do not validate watchdog
"""
modifiers.ensure_only_supported(
"--request-timeout", "--force", "--skip-offline",
"--no-watchdog-validation",
)
options = parse_args.prepare_options(
argv,
allowed_repeatable_options=("device", "watchdog")
)
default_watchdog, watchdog_dict = _sbd_parse_watchdogs(
options.get("watchdog", [])
)
default_device_list, node_device_dict = _sbd_parse_node_specific_options(
options.get("device", [])
)
lib.sbd.enable_sbd(
default_watchdog,
watchdog_dict,
{
name: value for name, value in options.items()
if name not in ("device", "watchdog")
},
default_device_list=(
default_device_list if default_device_list else None
),
node_device_dict=node_device_dict if node_device_dict else None,
allow_unknown_opts=modifiers.get("--force"),
ignore_offline_nodes=modifiers.get("--skip-offline"),
no_watchdog_validation=modifiers.get("--no-watchdog-validation"),
)
def _sbd_parse_node_specific_options(arg_list):
"""
Commandline options: no options
"""
default_option_list = []
node_specific_option_dict = {}
for arg in arg_list:
if "@" in arg:
option, node_name = arg.rsplit("@", 1)
if node_name in node_specific_option_dict:
node_specific_option_dict[node_name].append(option)
else:
node_specific_option_dict[node_name] = [option]
else:
default_option_list.append(arg)
return default_option_list, node_specific_option_dict
def _sbd_parse_watchdogs(watchdog_list):
"""
Commandline options: no options
"""
default_watchdog_list, node_specific_watchdog_dict =\
_sbd_parse_node_specific_options(watchdog_list)
if not default_watchdog_list:
default_watchdog = None
elif len(default_watchdog_list) == 1:
default_watchdog = default_watchdog_list[0]
else:
raise CmdLineInputError("Multiple watchdog definitions.")
watchdog_dict = {}
for node, node_watchdog_list in node_specific_watchdog_dict.items():
if len(node_watchdog_list) > 1:
raise CmdLineInputError(
"Multiple watchdog definitions for node '{node}'".format(
node=node
)
)
watchdog_dict[node] = node_watchdog_list[0]
return default_watchdog, watchdog_dict
def sbd_disable(lib, argv, modifiers):
"""
Options:
* --request-timeout - HTTP request timeout
* --skip-offline - skip offline cluster nodes
"""
modifiers.ensure_only_supported("--request-timeout", "--skip-offline")
if argv:
raise CmdLineInputError()
lib.sbd.disable_sbd(modifiers.get("--skip-offline"))
def sbd_status(lib, argv, modifiers):
"""
Options:
* --request-timeout - HTTP request timeout
* --full - display SBD device header
"""
modifiers.ensure_only_supported("--request-timeout", "--full")
def _bool_to_str(val):
if val is None:
return "N/A"
return "YES" if val else " NO"
if argv:
raise CmdLineInputError()
status_list = lib.sbd.get_cluster_sbd_status()
if not status_list:
utils.err("Unable to get SBD status from any node.")
print("SBD STATUS")
print("<node name>: <installed> | <enabled> | <running>")
for node_status in status_list:
status = node_status["status"]
print("{node}: {installed} | {enabled} | {running}".format(
node=node_status["node"],
installed=_bool_to_str(status.get("installed")),
enabled=_bool_to_str(status.get("enabled")),
running=_bool_to_str(status.get("running"))
))
device_list = lib.sbd.get_local_devices_info(modifiers.get("--full"))
for device in device_list:
print()
print("Messages list on device '{0}':".format(device["device"]))
print("<unknown>" if device["list"] is None else device["list"])
if modifiers.get("--full"):
print()
print("SBD header on device '{0}':".format(device["device"]))
print("<unknown>" if device["dump"] is None else device["dump"])
def _print_per_node_option(config_list, config_option):
"""
Commandline options: no options
"""
unknown_value = "<unknown>"
for config in config_list:
value = unknown_value
if config["config"] is not None:
value = config["config"].get(config_option, unknown_value)
print(" {node}: {value}".format(node=config["node"], value=value))
def sbd_config(lib, argv, modifiers):
"""
Options:
* --request-timeout - HTTP request timeout
"""
modifiers.ensure_only_supported("--request-timeout")
if argv:
raise CmdLineInputError()
config_list = lib.sbd.get_cluster_sbd_config()
if not config_list:
utils.err("No config obtained.")
config = config_list[0]["config"]
filtered_options = [
"SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER", "SBD_DEVICE"
]
with_device = False
for key, val in config.items():
if key == "SBD_DEVICE":
with_device = True
if key in filtered_options:
continue
print("{key}={val}".format(key=key, val=val))
print()
print("Watchdogs:")
_print_per_node_option(config_list, "SBD_WATCHDOG_DEV")
if with_device:
print()
print("Devices:")
_print_per_node_option(config_list, "SBD_DEVICE")
def local_sbd_config(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if argv:
raise CmdLineInputError()
print(json.dumps(lib.sbd.get_local_sbd_config()))
def sbd_setup_block_device(lib, argv, modifiers):
"""
Options:
* --force - do not show warning about wiping the devices
"""
modifiers.ensure_only_supported("--force")
options = parse_args.prepare_options(
argv,
allowed_repeatable_options=("device",)
)
device_list = options.get("device", [])
if not device_list:
raise CmdLineInputError("No device defined")
if not modifiers.get("--force"):
answer = utils.get_terminal_input(
(
"WARNING: All current content on device(s) '{device}' will be"
+ " overwritten. Are you sure you want to continue? [y/N] "
).format(device="', '".join(device_list))
)
if answer.lower() not in ["y", "yes"]:
print("Canceled")
return
lib.sbd.initialize_block_devices(
device_list,
{
name: value for name, value in options.items()
if name != "device"
}
)
def sbd_message(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if len(argv) != 3:
raise CmdLineInputError()
device, node, message = argv
lib.sbd.set_message(device, node, message)
def stonith_history_show_cmd(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if len(argv) > 1:
raise CmdLineInputError()
node = argv[0] if argv else None
print(lib.stonith.history_get_text(node))
def stonith_history_cleanup_cmd(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if len(argv) > 1:
raise CmdLineInputError()
node = argv[0] if argv else None
print(lib.stonith.history_cleanup(node))
def stonith_history_update_cmd(lib, argv, modifiers):
"""
Options: no options
"""
modifiers.ensure_only_supported()
if argv:
raise CmdLineInputError()
print(lib.stonith.history_update())
|
dimkal/mne-python
|
refs/heads/master
|
examples/preprocessing/plot_eog_artifact_histogram.py
|
11
|
"""
========================
Show EOG artifact timing
========================
Compute the distribution of timing for EOG artifacts.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
events = mne.find_events(raw, 'STI 014')
eog_event_id = 512
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
raw.add_events(eog_events, 'STI 014')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
tmin, tmax = -0.2, 0.5
event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
# Get the stim channel data
pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
data = epochs.get_data()[:, pick_ch, :].astype(int)
data = np.sum((data.astype(int) & 512) == 512, axis=0)
###############################################################################
# Plot EOG artifact distribution
plt.stem(1e3 * epochs.times, data)
plt.xlabel('Times (ms)')
plt.ylabel('Blink counts (from %s trials)' % len(epochs))
plt.show()
|
cga-harvard/cga-worldmap
|
refs/heads/legacy
|
geonode/maps/urls.py
|
1
|
from django.conf.urls.defaults import patterns, url
js_info_dict = {
'packages': ('geonode.maps','geonode.capabilities'),
}
urlpatterns = patterns('geonode.maps.views',
url(r'^$', 'maps', name='maps_home'),
url(r'^new/?$', 'newmap', name="maps_new"),
url(r'^new/data$', 'newmapJSON', name='maps_new_JSON'),
url(r'^add_endpoint?$', 'add_endpoint', name='add_endpoint'),
(r'^(?P<mapid>\d+)/share/?$', 'map_share'),
(r'^(?P<mapid>\d+)/info/?$', 'map_controller'),
(r'^(?P<mapid>\d+)/info/describe/?$', 'describemap'),
url(r'^(?P<mapid>\d+)/download/$', 'map_download', name='maps_download'),
url(r'^check/$', 'check_download', name='maps_download_check'),
(r'^checkurl/?$', 'ajax_url_lookup'),
(r'^history/(?P<mapid>\d+)/?$', 'ajax_snapshot_history'),
(r'^embed/$', 'embed'),
(r'^(?P<mapid>[^/]+)/embed/?$', 'embed'),
(r'^(?P<mapid>[^/]+)/mobile/?$', 'mobilemap'),
(r'^print/?$', 'printmap'),
url(r'^(?P<mapid>\d+)/data$', 'mapJSON', name='maps_JSON'),
(r'^addgeonodelayer/?$', 'addLayerJSON'),
(r'^snapshot/create/?$', 'snapshot_create'),
url(r'^search/?$', 'maps_search_page', name='maps_search'),
url(r'^search/api/?$', 'maps_search', name='maps_search_api'),
url(r'^search/detail/?$', 'maps_search_result_detail', name='map_search_detail'),
url(r'^(?P<mapid>\d+)/ajax-permissions$', 'ajax_map_permissions', name='maps_ajax_perm'),
url(r'^change-poc/(?P<ids>\w+)$', 'change_poc', name='maps_change_poc'),
url(r'^(?P<mapid>\d+)/ajax-permissions-email/?$', 'ajax_map_permissions_by_email',
name='ajax_map_permissions_by_email'),
(r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/?$', 'view'),
(r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/embed/?$', 'embed'),
(r'^(?P<mapid>[^/]+)/(?P<snapshot>[A-Za-z0-9_\-]+)/mobile/?$', 'mobilemap'),
(r'^(?P<mapid>[^/]+)/?$', 'view'),
)
datapatterns = patterns('geonode.maps.views',
url(r'^resolve_user/?$', 'resolve_user', name='layer_resolve_user'),
url(r'^$', 'browse_data', name='data_home'),
url(r'^acls/?$', 'layer_acls', name='data_acls'),
url(r'^search/?$', 'search_page', name='data_search'),
url(r'^search/api/?$', 'metadata_search', name='data_search_api'),
url(r'^search/detail/?$', 'search_result_detail', name='data_search_detail'),
url(r'^api/batch_permissions/?$', 'batch_permissions', name='data_batch_perm'),
url(r'^api/batch_permissions_by_email/?$', 'batch_permissions_by_email'),
url(r'^api/batch_delete/?$', 'batch_delete', name='data_batch_del'),
url(r'^upload/?', 'upload_layer', name='data_upload'),
url(r'^download$', 'batch_layer_download', name='data_download'),
url(r'^create_pg_layer', 'create_pg_layer', name='create_pg_layer'),
url(r'^addlayers/?$', 'addlayers', name='addlayers'),
(r'^layerstats/?$', 'ajax_increment_layer_stats'),
url(r'^(?P<layername>[^/]*)$', 'layer_detail', name="data_detail"),
url(r'^(?P<layername>[^/]*)/metadata$', 'layer_metadata', name="data_metadata"),
url(r'^(?P<layername>[^/]*)/contacts$', 'layer_contacts', name="layer_contacts"),
url(r'^(?P<layername>[^/]*)/remove$', 'layer_remove', name="data_remove"),
url(r'^(?P<layername>[^/]*)/replace$', 'layer_replace', name="data_replace"),
url(r'^(?P<layername>[^/]*)/style$', 'layer_style', name="data_style"),
url(r'^(?P<layername>[^/]*)/ajax-permissions$', 'ajax_layer_permissions', name='data_ajax_perm'),
url(r'^(?P<layername>[^/]*)/ajax-permissions-email$', 'ajax_layer_permissions_by_email', name="data_ajax_perm_email"),
(r'^(?P<layername>[^/]*)/ajax-edit-check/?$', 'ajax_layer_edit_check'),
(r'^(?P<layername>[^/]*)/ajax_layer_update/?$', 'ajax_layer_update'),
(r'^layerstats/?$', 'ajax_increment_layer_stats'),
url(r'^addlayers/?$', 'addlayers', name='addlayers'),
url(r'^api/batch_permissions_by_email/?$', 'batch_permissions_by_email'),
url(r'^create_pg_layer', 'create_pg_layer', name='create_pg_layer'),
)
|
camptocamp/ngo-addons-backport
|
refs/heads/master
|
addons/event/wizard/__init__.py
|
435
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
PokemonGoF/PokemonGo-Bot-Desktop
|
refs/heads/development
|
build/pywin/Lib/stringprep.py
|
278
|
# This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024,65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:u'\u03bc', 0xdf:u'ss', 0x130:u'i\u0307', 0x149:u'\u02bcn',
0x17f:u's', 0x1f0:u'j\u030c', 0x345:u'\u03b9', 0x37a:u' \u03b9',
0x390:u'\u03b9\u0308\u0301', 0x3b0:u'\u03c5\u0308\u0301', 0x3c2:u'\u03c3', 0x3d0:u'\u03b2',
0x3d1:u'\u03b8', 0x3d2:u'\u03c5', 0x3d3:u'\u03cd', 0x3d4:u'\u03cb',
0x3d5:u'\u03c6', 0x3d6:u'\u03c0', 0x3f0:u'\u03ba', 0x3f1:u'\u03c1',
0x3f2:u'\u03c3', 0x3f5:u'\u03b5', 0x587:u'\u0565\u0582', 0x1e96:u'h\u0331',
0x1e97:u't\u0308', 0x1e98:u'w\u030a', 0x1e99:u'y\u030a', 0x1e9a:u'a\u02be',
0x1e9b:u'\u1e61', 0x1f50:u'\u03c5\u0313', 0x1f52:u'\u03c5\u0313\u0300', 0x1f54:u'\u03c5\u0313\u0301',
0x1f56:u'\u03c5\u0313\u0342', 0x1f80:u'\u1f00\u03b9', 0x1f81:u'\u1f01\u03b9', 0x1f82:u'\u1f02\u03b9',
0x1f83:u'\u1f03\u03b9', 0x1f84:u'\u1f04\u03b9', 0x1f85:u'\u1f05\u03b9', 0x1f86:u'\u1f06\u03b9',
0x1f87:u'\u1f07\u03b9', 0x1f88:u'\u1f00\u03b9', 0x1f89:u'\u1f01\u03b9', 0x1f8a:u'\u1f02\u03b9',
0x1f8b:u'\u1f03\u03b9', 0x1f8c:u'\u1f04\u03b9', 0x1f8d:u'\u1f05\u03b9', 0x1f8e:u'\u1f06\u03b9',
0x1f8f:u'\u1f07\u03b9', 0x1f90:u'\u1f20\u03b9', 0x1f91:u'\u1f21\u03b9', 0x1f92:u'\u1f22\u03b9',
0x1f93:u'\u1f23\u03b9', 0x1f94:u'\u1f24\u03b9', 0x1f95:u'\u1f25\u03b9', 0x1f96:u'\u1f26\u03b9',
0x1f97:u'\u1f27\u03b9', 0x1f98:u'\u1f20\u03b9', 0x1f99:u'\u1f21\u03b9', 0x1f9a:u'\u1f22\u03b9',
0x1f9b:u'\u1f23\u03b9', 0x1f9c:u'\u1f24\u03b9', 0x1f9d:u'\u1f25\u03b9', 0x1f9e:u'\u1f26\u03b9',
0x1f9f:u'\u1f27\u03b9', 0x1fa0:u'\u1f60\u03b9', 0x1fa1:u'\u1f61\u03b9', 0x1fa2:u'\u1f62\u03b9',
0x1fa3:u'\u1f63\u03b9', 0x1fa4:u'\u1f64\u03b9', 0x1fa5:u'\u1f65\u03b9', 0x1fa6:u'\u1f66\u03b9',
0x1fa7:u'\u1f67\u03b9', 0x1fa8:u'\u1f60\u03b9', 0x1fa9:u'\u1f61\u03b9', 0x1faa:u'\u1f62\u03b9',
0x1fab:u'\u1f63\u03b9', 0x1fac:u'\u1f64\u03b9', 0x1fad:u'\u1f65\u03b9', 0x1fae:u'\u1f66\u03b9',
0x1faf:u'\u1f67\u03b9', 0x1fb2:u'\u1f70\u03b9', 0x1fb3:u'\u03b1\u03b9', 0x1fb4:u'\u03ac\u03b9',
0x1fb6:u'\u03b1\u0342', 0x1fb7:u'\u03b1\u0342\u03b9', 0x1fbc:u'\u03b1\u03b9', 0x1fbe:u'\u03b9',
0x1fc2:u'\u1f74\u03b9', 0x1fc3:u'\u03b7\u03b9', 0x1fc4:u'\u03ae\u03b9', 0x1fc6:u'\u03b7\u0342',
0x1fc7:u'\u03b7\u0342\u03b9', 0x1fcc:u'\u03b7\u03b9', 0x1fd2:u'\u03b9\u0308\u0300', 0x1fd3:u'\u03b9\u0308\u0301',
0x1fd6:u'\u03b9\u0342', 0x1fd7:u'\u03b9\u0308\u0342', 0x1fe2:u'\u03c5\u0308\u0300', 0x1fe3:u'\u03c5\u0308\u0301',
0x1fe4:u'\u03c1\u0313', 0x1fe6:u'\u03c5\u0342', 0x1fe7:u'\u03c5\u0308\u0342', 0x1ff2:u'\u1f7c\u03b9',
0x1ff3:u'\u03c9\u03b9', 0x1ff4:u'\u03ce\u03b9', 0x1ff6:u'\u03c9\u0342', 0x1ff7:u'\u03c9\u0342\u03b9',
0x1ffc:u'\u03c9\u03b9', 0x20a8:u'rs', 0x2102:u'c', 0x2103:u'\xb0c',
0x2107:u'\u025b', 0x2109:u'\xb0f', 0x210b:u'h', 0x210c:u'h',
0x210d:u'h', 0x2110:u'i', 0x2111:u'i', 0x2112:u'l',
0x2115:u'n', 0x2116:u'no', 0x2119:u'p', 0x211a:u'q',
0x211b:u'r', 0x211c:u'r', 0x211d:u'r', 0x2120:u'sm',
0x2121:u'tel', 0x2122:u'tm', 0x2124:u'z', 0x2128:u'z',
0x212c:u'b', 0x212d:u'c', 0x2130:u'e', 0x2131:u'f',
0x2133:u'm', 0x213e:u'\u03b3', 0x213f:u'\u03c0', 0x2145:u'd',
0x3371:u'hpa', 0x3373:u'au', 0x3375:u'ov', 0x3380:u'pa',
0x3381:u'na', 0x3382:u'\u03bca', 0x3383:u'ma', 0x3384:u'ka',
0x3385:u'kb', 0x3386:u'mb', 0x3387:u'gb', 0x338a:u'pf',
0x338b:u'nf', 0x338c:u'\u03bcf', 0x3390:u'hz', 0x3391:u'khz',
0x3392:u'mhz', 0x3393:u'ghz', 0x3394:u'thz', 0x33a9:u'pa',
0x33aa:u'kpa', 0x33ab:u'mpa', 0x33ac:u'gpa', 0x33b4:u'pv',
0x33b5:u'nv', 0x33b6:u'\u03bcv', 0x33b7:u'mv', 0x33b8:u'kv',
0x33b9:u'mv', 0x33ba:u'pw', 0x33bb:u'nw', 0x33bc:u'\u03bcw',
0x33bd:u'mw', 0x33be:u'kw', 0x33bf:u'mw', 0x33c0:u'k\u03c9',
0x33c1:u'm\u03c9', 0x33c3:u'bq', 0x33c6:u'c\u2215kg', 0x33c7:u'co.',
0x33c8:u'db', 0x33c9:u'gy', 0x33cb:u'hp', 0x33cd:u'kk',
0x33ce:u'km', 0x33d7:u'ph', 0x33d9:u'ppm', 0x33da:u'pr',
0x33dc:u'sv', 0x33dd:u'wb', 0xfb00:u'ff', 0xfb01:u'fi',
0xfb02:u'fl', 0xfb03:u'ffi', 0xfb04:u'ffl', 0xfb05:u'st',
0xfb06:u'st', 0xfb13:u'\u0574\u0576', 0xfb14:u'\u0574\u0565', 0xfb15:u'\u0574\u056b',
0xfb16:u'\u057e\u0576', 0xfb17:u'\u0574\u056d', 0x1d400:u'a', 0x1d401:u'b',
0x1d402:u'c', 0x1d403:u'd', 0x1d404:u'e', 0x1d405:u'f',
0x1d406:u'g', 0x1d407:u'h', 0x1d408:u'i', 0x1d409:u'j',
0x1d40a:u'k', 0x1d40b:u'l', 0x1d40c:u'm', 0x1d40d:u'n',
0x1d40e:u'o', 0x1d40f:u'p', 0x1d410:u'q', 0x1d411:u'r',
0x1d412:u's', 0x1d413:u't', 0x1d414:u'u', 0x1d415:u'v',
0x1d416:u'w', 0x1d417:u'x', 0x1d418:u'y', 0x1d419:u'z',
0x1d434:u'a', 0x1d435:u'b', 0x1d436:u'c', 0x1d437:u'd',
0x1d438:u'e', 0x1d439:u'f', 0x1d43a:u'g', 0x1d43b:u'h',
0x1d43c:u'i', 0x1d43d:u'j', 0x1d43e:u'k', 0x1d43f:u'l',
0x1d440:u'm', 0x1d441:u'n', 0x1d442:u'o', 0x1d443:u'p',
0x1d444:u'q', 0x1d445:u'r', 0x1d446:u's', 0x1d447:u't',
0x1d448:u'u', 0x1d449:u'v', 0x1d44a:u'w', 0x1d44b:u'x',
0x1d44c:u'y', 0x1d44d:u'z', 0x1d468:u'a', 0x1d469:u'b',
0x1d46a:u'c', 0x1d46b:u'd', 0x1d46c:u'e', 0x1d46d:u'f',
0x1d46e:u'g', 0x1d46f:u'h', 0x1d470:u'i', 0x1d471:u'j',
0x1d472:u'k', 0x1d473:u'l', 0x1d474:u'm', 0x1d475:u'n',
0x1d476:u'o', 0x1d477:u'p', 0x1d478:u'q', 0x1d479:u'r',
0x1d47a:u's', 0x1d47b:u't', 0x1d47c:u'u', 0x1d47d:u'v',
0x1d47e:u'w', 0x1d47f:u'x', 0x1d480:u'y', 0x1d481:u'z',
0x1d49c:u'a', 0x1d49e:u'c', 0x1d49f:u'd', 0x1d4a2:u'g',
0x1d4a5:u'j', 0x1d4a6:u'k', 0x1d4a9:u'n', 0x1d4aa:u'o',
0x1d4ab:u'p', 0x1d4ac:u'q', 0x1d4ae:u's', 0x1d4af:u't',
0x1d4b0:u'u', 0x1d4b1:u'v', 0x1d4b2:u'w', 0x1d4b3:u'x',
0x1d4b4:u'y', 0x1d4b5:u'z', 0x1d4d0:u'a', 0x1d4d1:u'b',
0x1d4d2:u'c', 0x1d4d3:u'd', 0x1d4d4:u'e', 0x1d4d5:u'f',
0x1d4d6:u'g', 0x1d4d7:u'h', 0x1d4d8:u'i', 0x1d4d9:u'j',
0x1d4da:u'k', 0x1d4db:u'l', 0x1d4dc:u'm', 0x1d4dd:u'n',
0x1d4de:u'o', 0x1d4df:u'p', 0x1d4e0:u'q', 0x1d4e1:u'r',
0x1d4e2:u's', 0x1d4e3:u't', 0x1d4e4:u'u', 0x1d4e5:u'v',
0x1d4e6:u'w', 0x1d4e7:u'x', 0x1d4e8:u'y', 0x1d4e9:u'z',
0x1d504:u'a', 0x1d505:u'b', 0x1d507:u'd', 0x1d508:u'e',
0x1d509:u'f', 0x1d50a:u'g', 0x1d50d:u'j', 0x1d50e:u'k',
0x1d50f:u'l', 0x1d510:u'm', 0x1d511:u'n', 0x1d512:u'o',
0x1d513:u'p', 0x1d514:u'q', 0x1d516:u's', 0x1d517:u't',
0x1d518:u'u', 0x1d519:u'v', 0x1d51a:u'w', 0x1d51b:u'x',
0x1d51c:u'y', 0x1d538:u'a', 0x1d539:u'b', 0x1d53b:u'd',
0x1d53c:u'e', 0x1d53d:u'f', 0x1d53e:u'g', 0x1d540:u'i',
0x1d541:u'j', 0x1d542:u'k', 0x1d543:u'l', 0x1d544:u'm',
0x1d546:u'o', 0x1d54a:u's', 0x1d54b:u't', 0x1d54c:u'u',
0x1d54d:u'v', 0x1d54e:u'w', 0x1d54f:u'x', 0x1d550:u'y',
0x1d56c:u'a', 0x1d56d:u'b', 0x1d56e:u'c', 0x1d56f:u'd',
0x1d570:u'e', 0x1d571:u'f', 0x1d572:u'g', 0x1d573:u'h',
0x1d574:u'i', 0x1d575:u'j', 0x1d576:u'k', 0x1d577:u'l',
0x1d578:u'm', 0x1d579:u'n', 0x1d57a:u'o', 0x1d57b:u'p',
0x1d57c:u'q', 0x1d57d:u'r', 0x1d57e:u's', 0x1d57f:u't',
0x1d580:u'u', 0x1d581:u'v', 0x1d582:u'w', 0x1d583:u'x',
0x1d584:u'y', 0x1d585:u'z', 0x1d5a0:u'a', 0x1d5a1:u'b',
0x1d5a2:u'c', 0x1d5a3:u'd', 0x1d5a4:u'e', 0x1d5a5:u'f',
0x1d5a6:u'g', 0x1d5a7:u'h', 0x1d5a8:u'i', 0x1d5a9:u'j',
0x1d5aa:u'k', 0x1d5ab:u'l', 0x1d5ac:u'm', 0x1d5ad:u'n',
0x1d5ae:u'o', 0x1d5af:u'p', 0x1d5b0:u'q', 0x1d5b1:u'r',
0x1d5b2:u's', 0x1d5b3:u't', 0x1d5b4:u'u', 0x1d5b5:u'v',
0x1d5b6:u'w', 0x1d5b7:u'x', 0x1d5b8:u'y', 0x1d5b9:u'z',
0x1d5d4:u'a', 0x1d5d5:u'b', 0x1d5d6:u'c', 0x1d5d7:u'd',
0x1d5d8:u'e', 0x1d5d9:u'f', 0x1d5da:u'g', 0x1d5db:u'h',
0x1d5dc:u'i', 0x1d5dd:u'j', 0x1d5de:u'k', 0x1d5df:u'l',
0x1d5e0:u'm', 0x1d5e1:u'n', 0x1d5e2:u'o', 0x1d5e3:u'p',
0x1d5e4:u'q', 0x1d5e5:u'r', 0x1d5e6:u's', 0x1d5e7:u't',
0x1d5e8:u'u', 0x1d5e9:u'v', 0x1d5ea:u'w', 0x1d5eb:u'x',
0x1d5ec:u'y', 0x1d5ed:u'z', 0x1d608:u'a', 0x1d609:u'b',
0x1d60a:u'c', 0x1d60b:u'd', 0x1d60c:u'e', 0x1d60d:u'f',
0x1d60e:u'g', 0x1d60f:u'h', 0x1d610:u'i', 0x1d611:u'j',
0x1d612:u'k', 0x1d613:u'l', 0x1d614:u'm', 0x1d615:u'n',
0x1d616:u'o', 0x1d617:u'p', 0x1d618:u'q', 0x1d619:u'r',
0x1d61a:u's', 0x1d61b:u't', 0x1d61c:u'u', 0x1d61d:u'v',
0x1d61e:u'w', 0x1d61f:u'x', 0x1d620:u'y', 0x1d621:u'z',
0x1d63c:u'a', 0x1d63d:u'b', 0x1d63e:u'c', 0x1d63f:u'd',
0x1d640:u'e', 0x1d641:u'f', 0x1d642:u'g', 0x1d643:u'h',
0x1d644:u'i', 0x1d645:u'j', 0x1d646:u'k', 0x1d647:u'l',
0x1d648:u'm', 0x1d649:u'n', 0x1d64a:u'o', 0x1d64b:u'p',
0x1d64c:u'q', 0x1d64d:u'r', 0x1d64e:u's', 0x1d64f:u't',
0x1d650:u'u', 0x1d651:u'v', 0x1d652:u'w', 0x1d653:u'x',
0x1d654:u'y', 0x1d655:u'z', 0x1d670:u'a', 0x1d671:u'b',
0x1d672:u'c', 0x1d673:u'd', 0x1d674:u'e', 0x1d675:u'f',
0x1d676:u'g', 0x1d677:u'h', 0x1d678:u'i', 0x1d679:u'j',
0x1d67a:u'k', 0x1d67b:u'l', 0x1d67c:u'm', 0x1d67d:u'n',
0x1d67e:u'o', 0x1d67f:u'p', 0x1d680:u'q', 0x1d681:u'r',
0x1d682:u's', 0x1d683:u't', 0x1d684:u'u', 0x1d685:u'v',
0x1d686:u'w', 0x1d687:u'x', 0x1d688:u'y', 0x1d689:u'z',
0x1d6a8:u'\u03b1', 0x1d6a9:u'\u03b2', 0x1d6aa:u'\u03b3', 0x1d6ab:u'\u03b4',
0x1d6ac:u'\u03b5', 0x1d6ad:u'\u03b6', 0x1d6ae:u'\u03b7', 0x1d6af:u'\u03b8',
0x1d6b0:u'\u03b9', 0x1d6b1:u'\u03ba', 0x1d6b2:u'\u03bb', 0x1d6b3:u'\u03bc',
0x1d6b4:u'\u03bd', 0x1d6b5:u'\u03be', 0x1d6b6:u'\u03bf', 0x1d6b7:u'\u03c0',
0x1d6b8:u'\u03c1', 0x1d6b9:u'\u03b8', 0x1d6ba:u'\u03c3', 0x1d6bb:u'\u03c4',
0x1d6bc:u'\u03c5', 0x1d6bd:u'\u03c6', 0x1d6be:u'\u03c7', 0x1d6bf:u'\u03c8',
0x1d6c0:u'\u03c9', 0x1d6d3:u'\u03c3', 0x1d6e2:u'\u03b1', 0x1d6e3:u'\u03b2',
0x1d6e4:u'\u03b3', 0x1d6e5:u'\u03b4', 0x1d6e6:u'\u03b5', 0x1d6e7:u'\u03b6',
0x1d6e8:u'\u03b7', 0x1d6e9:u'\u03b8', 0x1d6ea:u'\u03b9', 0x1d6eb:u'\u03ba',
0x1d6ec:u'\u03bb', 0x1d6ed:u'\u03bc', 0x1d6ee:u'\u03bd', 0x1d6ef:u'\u03be',
0x1d6f0:u'\u03bf', 0x1d6f1:u'\u03c0', 0x1d6f2:u'\u03c1', 0x1d6f3:u'\u03b8',
0x1d6f4:u'\u03c3', 0x1d6f5:u'\u03c4', 0x1d6f6:u'\u03c5', 0x1d6f7:u'\u03c6',
0x1d6f8:u'\u03c7', 0x1d6f9:u'\u03c8', 0x1d6fa:u'\u03c9', 0x1d70d:u'\u03c3',
0x1d71c:u'\u03b1', 0x1d71d:u'\u03b2', 0x1d71e:u'\u03b3', 0x1d71f:u'\u03b4',
0x1d720:u'\u03b5', 0x1d721:u'\u03b6', 0x1d722:u'\u03b7', 0x1d723:u'\u03b8',
0x1d724:u'\u03b9', 0x1d725:u'\u03ba', 0x1d726:u'\u03bb', 0x1d727:u'\u03bc',
0x1d728:u'\u03bd', 0x1d729:u'\u03be', 0x1d72a:u'\u03bf', 0x1d72b:u'\u03c0',
0x1d72c:u'\u03c1', 0x1d72d:u'\u03b8', 0x1d72e:u'\u03c3', 0x1d72f:u'\u03c4',
0x1d730:u'\u03c5', 0x1d731:u'\u03c6', 0x1d732:u'\u03c7', 0x1d733:u'\u03c8',
0x1d734:u'\u03c9', 0x1d747:u'\u03c3', 0x1d756:u'\u03b1', 0x1d757:u'\u03b2',
0x1d758:u'\u03b3', 0x1d759:u'\u03b4', 0x1d75a:u'\u03b5', 0x1d75b:u'\u03b6',
0x1d75c:u'\u03b7', 0x1d75d:u'\u03b8', 0x1d75e:u'\u03b9', 0x1d75f:u'\u03ba',
0x1d760:u'\u03bb', 0x1d761:u'\u03bc', 0x1d762:u'\u03bd', 0x1d763:u'\u03be',
0x1d764:u'\u03bf', 0x1d765:u'\u03c0', 0x1d766:u'\u03c1', 0x1d767:u'\u03b8',
0x1d768:u'\u03c3', 0x1d769:u'\u03c4', 0x1d76a:u'\u03c5', 0x1d76b:u'\u03c6',
0x1d76c:u'\u03c7', 0x1d76d:u'\u03c8', 0x1d76e:u'\u03c9', 0x1d781:u'\u03c3',
0x1d790:u'\u03b1', 0x1d791:u'\u03b2', 0x1d792:u'\u03b3', 0x1d793:u'\u03b4',
0x1d794:u'\u03b5', 0x1d795:u'\u03b6', 0x1d796:u'\u03b7', 0x1d797:u'\u03b8',
0x1d798:u'\u03b9', 0x1d799:u'\u03ba', 0x1d79a:u'\u03bb', 0x1d79b:u'\u03bc',
0x1d79c:u'\u03bd', 0x1d79d:u'\u03be', 0x1d79e:u'\u03bf', 0x1d79f:u'\u03c0',
0x1d7a0:u'\u03c1', 0x1d7a1:u'\u03b8', 0x1d7a2:u'\u03c3', 0x1d7a3:u'\u03c4',
0x1d7a4:u'\u03c5', 0x1d7a5:u'\u03c6', 0x1d7a6:u'\u03c7', 0x1d7a7:u'\u03c8',
0x1d7a8:u'\u03c9', 0x1d7bb:u'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = u"".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == u" "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != u" "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288,8292) + range(8298,8304) + range(65529,65533) + range(119155,119163))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234,8239) + range(8298,8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536,917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
|
tylernm14/donut-dagger
|
refs/heads/master
|
wrapper/python/streamed_job/setup.py
|
2
|
from distutils.core import setup
setup(
name='streamed_job',
version='0.1.0',
author='Tyler Martin',
author_email='tylernm@gmail.com',
packages=['streamed_job'],
scripts=['bin/run_streamed_job.py'],
url='http://www.fakeurl.nowhere',
license='LICENSE.txt',
description='Run a process with a timeout and stream output to a db',
long_description=open('README.txt').read(),
install_requires=[
"backports.tempfile",
"backoff",
"requests",
],
)
|
citrix-openstack/build-ryu
|
refs/heads/ctx-nova-network-smoke-latest
|
ryu/controller/handler.py
|
7
|
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
from ryu.controller import ofp_event
LOG = logging.getLogger('ryu.controller.handler')
# just represent OF datapath state. datapath specific so should be moved.
HANDSHAKE_DISPATCHER = "handshake"
CONFIG_DISPATCHER = "config"
MAIN_DISPATCHER = "main"
DEAD_DISPATCHER = "dead"
# should be named something like 'observe_event'
def set_ev_cls(ev_cls, dispatchers=None):
def _set_ev_cls_dec(handler):
handler.ev_cls = ev_cls
handler.dispatchers = _listify(dispatchers)
handler.observer = ev_cls.__module__
return handler
return _set_ev_cls_dec
def set_ev_handler(ev_cls, dispatchers=None):
def _set_ev_cls_dec(handler):
handler.ev_cls = ev_cls
handler.dispatchers = _listify(dispatchers)
return handler
return _set_ev_cls_dec
def _is_ev_cls(meth):
return hasattr(meth, 'ev_cls')
def _listify(may_list):
if may_list is None:
may_list = []
if not isinstance(may_list, list):
may_list = [may_list]
return may_list
def register_instance(i):
for _k, m in inspect.getmembers(i, inspect.ismethod):
# LOG.debug('instance %s k %s m %s', i, _k, m)
if _is_ev_cls(m):
i.register_handler(m.ev_cls, m)
|
octopus-platform/octopus-tools
|
refs/heads/master
|
tests/orientdb_shell_manager.py
|
5
|
import unittest
from octopus.server.shell_mananger import ShellManager
class TestShellManager(unittest.TestCase):
def testUnreachableServer(self):
self.hostname = 'localhost'
self.port = '1337'
shell_manager = ShellManager(self.hostname, self.port)
shells = shell_manager.list()
self.assertRaises(ConnectionRefusedError, list, shells)
|
boooka/GeoPowerOff
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/trie/py.py
|
1323
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
|
blckshrk/Weboob
|
refs/heads/master
|
modules/gdfsuez/pages/history.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Mathieu Jourdan
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import subprocess
import tempfile
import shutil
from datetime import date
from decimal import Decimal
from weboob.tools.browser import BasePage
from weboob.capabilities.base import NotAvailable
from weboob.capabilities.bill import Detail, Bill
__all__ = ['HistoryPage', 'PdfPage']
class HistoryPage(BasePage):
def on_loaded(self):
self.details = []
self.bills = []
# Latest bill
div = self.document.xpath('//div[@class="consulter_dernierefacture"]')[0]
bdate = div.xpath('p[@class="date"]/span[@class="textetertiaire"]')[0].text
bprice = div.xpath('p[@class="montant"]/span[@class="textetertiaire"]')[0].text
link = div.xpath('a[@id="display_popin"]')[0].attrib['href']
mydate = date(*reversed([int(x) for x in bdate.split("/")]))
price = Decimal(bprice.strip(u' € TTC').replace(',', '.'))
self.bills.append(self._create_bill(mydate, price, link))
# Previous bills
table = self.document.xpath('//table[@class="afficher_factures"]')[0]
for tr in table[0].xpath('//tbody/tr'):
cells = tr.xpath('td')
bdate = unicode(cells[0].text.strip())
mydate = date(*reversed([int(x) for x in bdate.split("/")]))
bprice = unicode(cells[1].text)
price = Decimal(bprice.strip(u' €').replace(',', '.'))
link = cells[3].xpath('a')[0].attrib['href']
self.bills.append(self._create_bill(mydate, price, link))
def _create_bill(self, date, price, link):
bill = Bill()
bill.id = date.__str__().replace('-', '')
bill.date = date
bill._price = price
bill._url = link
bill.format = u'pdf'
bill.label = unicode(price)
return bill
def get_details(self):
return self.details
def get_bills(self):
return self.bills
class PdfPage():
def __init__(self, file):
self.pdf = file
def _parse_pdf(self):
pdffile = tempfile.NamedTemporaryFile(bufsize=100000, mode='w', suffix='.pdf')
temptxt = pdffile.name.replace('.pdf', '.txt')
cmd = "ebook-convert"
stdout = open("/dev/null", "w")
shutil.copyfileobj(self.pdf, pdffile)
pdffile.flush()
subprocess.call([cmd, pdffile.name, temptxt], stdout=stdout)
pdffile.close()
txtfile = open(temptxt, 'r')
txt = txtfile.read()
txtfile.close()
os.remove(temptxt)
return txt
def _parse_page(self, page):
# Regexp
footnote = re.compile(r'\([0-9]\) ') # (f)
ht = re.compile('HT par mois')
base = re.compile('la base de')
enddate = re.compile('\d\d\/\d\d\/\d\d') # YY/MM/DD
endwithdigit = re.compile('\d+$') # blah blah 42
textwithcoma = re.compile('([a-z]|\d{4})\,') # blah 2012, blah blah
# Parsing
details = []
for title in ['Abonnement',
'Consommation',
'Contributions et taxes liées à l\'énergie']:
section = page.split(title, 1)[1].split('Total ')[0]
# When a line holds '(0)', a newline is missing.
section = re.sub(footnote, '\n', section)
lines = section.split('\n')
lines = [x for x in lines if len(x) > 0] # Remove empty lines
detail = None
for line in lines:
if re.match('[A-Za-z]', line[0]):
# Things we want to merge with the one just before
if 'facturées' in line:
# Long lines are sometimes split, so we try to join them
# That is the case for:
# 'Déduction du montant des consommations
# estimées facturées du 00/00/00 au 00/00/00'
detail.label = detail.label + u' ' + unicode(line, encoding='utf-8')
# Things for which we want a new detail
else:
# Entering here, we will instantiate a new detail.
# We hadn't so before because of fragmented lines.
if detail is not None and detail.label is not NotAvailable:
# We have a new element, return the other one
details.append(detail)
detail = Detail()
detail.price = Decimal(0)
# If the coma is not a decimal separator, then
# this is is probably a loooong sentence.
# When it comes to jokes, keep it short and sweet.
line = re.split(textwithcoma, line)[0]
# Things we want for sure
if re.findall(enddate, line):
# When a line has been badly split after a date,
# We want the label to end after the date, and maybe
# the second part to be the info
mydate = re.search(enddate, line).group(0)
mylist = line.rpartition(mydate)
label = mylist[0] + mylist[1]
detail.label = unicode(label, encoding='utf-8')
elif re.findall(endwithdigit, line):
# What is this stupid number at the end of the line?
# Line should have been split before the number
detail.label = unicode(re.split(endwithdigit, line)[0], encoding='utf-8')
# Things we don't want for sure
elif ')' in line and '(' not in line:
# First part of the parenthesis should have been drop before
# Avoid to create a new empty detail
detail.label = NotAvailable
elif re.match(base, line):
# This string should come always after a date,
# usually, it will match one of the cases above.
# Sometimes, it appears on a new line we don't need.
detail.label = NotAvailable
elif re.match(ht, line):
# '00,00 € HT par mois' may have been split after HT
# We don't need of the second line
detail.label = NotAvailable
# Things we probably want to keep
else:
# Well, maybe our line is correct, after all.
# Not much to do.
detail.label = unicode(line, encoding='utf-8')
detail.infos = NotAvailable
elif ' %' in line:
if isinstance(detail, Detail):
# Sometimes the vat is not on a new line:
# '00,00 00,0 %' instead of '00,0 %'
vat = line.split()[line.count(' ')-1].replace(',', '.')
detail.infos = unicode('TVA: ' + vat)
elif ' €' in line:
price = line.replace(',', '.')
if isinstance(detail, Detail):
detail.price = Decimal(price.strip(' €'))
elif re.match(enddate, line):
# Line holding dates may have been mixed up
label = detail.label.split(' au ')[0] + u' au ' + unicode(line, encoding='utf-8')
detail.label = label
if detail.label is not NotAvailable:
# Do not append empty details to the list
# It seemed easier to create details anyway than dealing
# with None objects
details.append(detail)
return details
def get_details(self, label):
txt = self._parse_pdf()
page = None
if label == u'Gaz naturel':
page = txt.split('GAZ NATUREL')[1].split('TOTAL GAZ NATUREL TTC')[0]
elif label == u'Electricité':
page = txt.split('ELECTRICITE')[1].split('TOTAL ELECTRICITE TTC')[0]
else:
pass
return self._parse_page(page)
|
AtScaleInc/Impala
|
refs/heads/master
|
thirdparty/thrift-0.9.0/tutorial/py/PythonServer.py
|
28
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
sys.path.append('../gen-py')
from tutorial import Calculator
from tutorial.ttypes import *
from shared.ttypes import SharedStruct
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
class CalculatorHandler:
def __init__(self):
self.log = {}
def ping(self):
print 'ping()'
def add(self, n1, n2):
print 'add(%d,%d)' % (n1, n2)
return n1+n2
def calculate(self, logid, work):
print 'calculate(%d, %r)' % (logid, work)
if work.op == Operation.ADD:
val = work.num1 + work.num2
elif work.op == Operation.SUBTRACT:
val = work.num1 - work.num2
elif work.op == Operation.MULTIPLY:
val = work.num1 * work.num2
elif work.op == Operation.DIVIDE:
if work.num2 == 0:
x = InvalidOperation()
x.what = work.op
x.why = 'Cannot divide by 0'
raise x
val = work.num1 / work.num2
else:
x = InvalidOperation()
x.what = work.op
x.why = 'Invalid operation'
raise x
log = SharedStruct()
log.key = logid
log.value = '%d' % (val)
self.log[logid] = log
return val
def getStruct(self, key):
print 'getStruct(%d)' % (key)
return self.log[key]
def zip(self):
print 'zip()'
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
transport = TSocket.TServerSocket(port=9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
# You could do one of these for a multithreaded server
#server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
#server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory)
print 'Starting the server...'
server.serve()
print 'done.'
|
oihane/odoo
|
refs/heads/8.0
|
addons/stock_account/stock_account.py
|
89
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID, api
import logging
_logger = logging.getLogger(__name__)
class stock_inventory(osv.osv):
_inherit = "stock.inventory"
_columns = {
'period_id': fields.many2one('account.period', 'Force Valuation Period', help="Choose the accounting period where you want to value the stock moves created by the inventory instead of the default one (chosen by the inventory end date)"),
}
def post_inventory(self, cr, uid, inv, context=None):
if context is None:
context = {}
ctx = context.copy()
if inv.period_id:
ctx['force_period'] = inv.period_id.id
return super(stock_inventory, self).post_inventory(cr, uid, inv, context=ctx)
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_inherit = "stock.location"
_columns = {
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain=[('type', '=', 'other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
_inherit = "stock.quant"
def _get_inventory_value(self, cr, uid, quant, context=None):
if quant.product_id.cost_method in ('real'):
return quant.cost * quant.qty
return super(stock_quant, self)._get_inventory_value(cr, uid, quant, context=context)
@api.cr_uid_ids_context
def _price_update(self, cr, uid, quant_ids, newprice, context=None):
''' This function is called at the end of negative quant reconciliation and does the accounting entries adjustemnts and the update of the product cost price if needed
'''
if context is None:
context = {}
account_period = self.pool['account.period']
super(stock_quant, self)._price_update(cr, uid, quant_ids, newprice, context=context)
for quant in self.browse(cr, uid, quant_ids, context=context):
move = self._get_latest_move(cr, uid, quant, context=context)
valuation_update = newprice - quant.cost
# this is where we post accounting entries for adjustment, if needed
if not quant.company_id.currency_id.is_zero(valuation_update):
# adjustment journal entry needed, cost has been updated
period_id = (context.get('force_period') or
account_period.find(cr, uid, move.date, context=context)[0])
period = account_period.browse(cr, uid, period_id, context=context)
# If neg quant period already closed (likely with manual valuation), skip update
if period.state != 'done':
ctx = dict(context, force_valuation_amount=valuation_update)
self._account_entry_move(cr, uid, [quant], move, context=ctx)
#update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
#1) the product cost's method is 'real'
#2) we just fixed a negative quant caused by an outgoing shipment
if quant.product_id.cost_method == 'real' and quant.location_id.usage != 'internal':
self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
def _account_entry_move(self, cr, uid, quants, move, context=None):
"""
Accounting Valuation Entries
quants: browse record list of Quants to create accounting valuation entries for. Unempty and all quants are supposed to have the same location id (thay already moved in)
move: Move to use. browse record
"""
if context is None:
context = {}
location_obj = self.pool.get('stock.location')
location_from = move.location_id
location_to = quants[0].location_id
company_from = location_obj._location_owner(cr, uid, location_from, context=context)
company_to = location_obj._location_owner(cr, uid, location_to, context=context)
if move.product_id.valuation != 'real_time':
return False
for q in quants:
if q.owner_id:
#if the quant isn't owned by the company, we don't make any valuation entry
return False
if q.qty <= 0:
#we don't make any stock valuation for negative quants because the valuation is already made for the counterpart.
#At that time the valuation will be made at the product cost price and afterward there will be new accounting entries
#to make the adjustments when we know the real cost price.
return False
#in case of routes making the link between several warehouse of the same company, the transit location belongs to this company, so we don't need to create accounting entries
# Create Journal Entry for products arriving in the company
if company_to and (move.location_id.usage not in ('internal', 'transit') and move.location_dest_id.usage == 'internal' or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_to.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_from and location_from.usage == 'customer':
#goods returned from customer
self._create_account_move_line(cr, uid, quants, move, acc_dest, acc_valuation, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_src, acc_valuation, journal_id, context=ctx)
# Create Journal Entry for products leaving the company
if company_from and (move.location_id.usage == 'internal' and move.location_dest_id.usage not in ('internal', 'transit') or company_from != company_to):
ctx = context.copy()
ctx['force_company'] = company_from.id
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, context=ctx)
if location_to and location_to.usage == 'supplier':
#goods returned to supplier
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_src, journal_id, context=ctx)
else:
self._create_account_move_line(cr, uid, quants, move, acc_valuation, acc_dest, journal_id, context=ctx)
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None):
quant = super(stock_quant, self)._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=force_location_from, force_location_to=force_location_to, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, [quant], move, context)
return quant
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
res = super(stock_quant, self).move_quants_write(cr, uid, quants, move, location_dest_id, dest_package_id, context=context)
if move.product_id.valuation == 'real_time':
self._account_entry_move(cr, uid, quants, move, context=context)
return res
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the quant.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:returns: journal_id, source account, destination account, valuation account
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj = self.pool.get('product.template')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.product_tmpl_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
return journal_id, acc_src, acc_dest, acc_valuation
def _prepare_account_move_line(self, cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given quant.
"""
if context is None:
context = {}
currency_obj = self.pool.get('res.currency')
if context.get('force_valuation_amount'):
valuation_amount = context.get('force_valuation_amount')
else:
if move.product_id.cost_method == 'average':
valuation_amount = cost if move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal' else move.product_id.standard_price
else:
valuation_amount = cost if move.product_id.cost_method == 'real' else move.product_id.standard_price
#the standard_price of the product may be in another decimal precision, or not compatible with the coinage of
#the company currency... so we need to use round() before creating the accounting entries.
valuation_amount = currency_obj.round(cr, uid, move.company_id.currency_id, valuation_amount * qty)
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'debit': valuation_amount > 0 and valuation_amount or 0,
'credit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': debit_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id.id,
'quantity': qty,
'product_uom_id': move.product_id.uom_id.id,
'ref': move.picking_id and move.picking_id.name or False,
'date': move.date,
'partner_id': partner_id,
'credit': valuation_amount > 0 and valuation_amount or 0,
'debit': valuation_amount < 0 and -valuation_amount or 0,
'account_id': credit_account_id,
}
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def _create_account_move_line(self, cr, uid, quants, move, credit_account_id, debit_account_id, journal_id, context=None):
#group quants by cost
quant_cost_qty = {}
for quant in quants:
if quant_cost_qty.get(quant.cost):
quant_cost_qty[quant.cost] += quant.qty
else:
quant_cost_qty[quant.cost] = quant.qty
move_obj = self.pool.get('account.move')
for cost, qty in quant_cost_qty.items():
move_lines = self._prepare_account_move_line(cr, uid, move, qty, cost, credit_account_id, debit_account_id, context=context)
period_id = context.get('force_period', self.pool.get('account.period').find(cr, uid, context=context)[0])
move_obj.create(cr, uid, {'journal_id': journal_id,
'line_id': move_lines,
'period_id': period_id,
'date': fields.date.context_today(self, cr, uid, context=context),
'ref': move.picking_id.name}, context=context)
#def _reconcile_single_negative_quant(self, cr, uid, to_solve_quant, quant, quant_neg, qty, context=None):
# move = self._get_latest_move(cr, uid, to_solve_quant, context=context)
# quant_neg_position = quant_neg.negative_dest_location_id.usage
# remaining_solving_quant, remaining_to_solve_quant = super(stock_quant, self)._reconcile_single_negative_quant(cr, uid, to_solve_quant, quant, quant_neg, qty, context=context)
# #update the standard price of the product, only if we would have done it if we'd have had enough stock at first, which means
# #1) there isn't any negative quant anymore
# #2) the product cost's method is 'real'
# #3) we just fixed a negative quant caused by an outgoing shipment
# if not remaining_to_solve_quant and move.product_id.cost_method == 'real' and quant_neg_position != 'internal':
# self.pool.get('stock.move')._store_average_cost_price(cr, uid, move, context=context)
# return remaining_solving_quant, remaining_to_solve_quant
class stock_move(osv.osv):
_inherit = "stock.move"
def action_done(self, cr, uid, ids, context=None):
self.product_price_update_before_done(cr, uid, ids, context=context)
res = super(stock_move, self).action_done(cr, uid, ids, context=context)
self.product_price_update_after_done(cr, uid, ids, context=context)
return res
def _store_average_cost_price(self, cr, uid, move, context=None):
''' move is a browe record '''
product_obj = self.pool.get('product.product')
if any([q.qty <= 0 for q in move.quant_ids]):
#if there is a negative quant, the standard price shouldn't be updated
return
#Note: here we can't store a quant.cost directly as we may have moved out 2 units (1 unit to 5€ and 1 unit to 7€) and in case of a product return of 1 unit, we can't know which of the 2 costs has to be used (5€ or 7€?). So at that time, thanks to the average valuation price we are storing we will svaluate it at 6€
average_valuation_price = 0.0
for q in move.quant_ids:
average_valuation_price += q.qty * q.cost
average_valuation_price = average_valuation_price / move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [move.product_id.id], {'standard_price': average_valuation_price}, context=ctx)
self.write(cr, uid, [move.id], {'price_unit': average_valuation_price}, context=context)
def product_price_update_before_done(self, cr, uid, ids, context=None):
product_obj = self.pool.get('product.product')
tmpl_dict = {}
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on incomming moves if the product cost_method is 'average'
if (move.location_id.usage == 'supplier') and (move.product_id.cost_method == 'average'):
product = move.product_id
prod_tmpl_id = move.product_id.product_tmpl_id.id
qty_available = move.product_id.product_tmpl_id.qty_available
if tmpl_dict.get(prod_tmpl_id):
product_avail = qty_available + tmpl_dict[prod_tmpl_id]
else:
tmpl_dict[prod_tmpl_id] = 0
product_avail = qty_available
if product_avail <= 0:
new_std_price = move.price_unit
else:
# Get the standard price
amount_unit = product.standard_price
new_std_price = ((amount_unit * product_avail) + (move.price_unit * move.product_qty)) / (product_avail + move.product_qty)
tmpl_dict[prod_tmpl_id] += move.product_qty
# Write the standard price, as SUPERUSER_ID because a warehouse manager may not have the right to write on products
ctx = dict(context or {}, force_company=move.company_id.id)
product_obj.write(cr, SUPERUSER_ID, [product.id], {'standard_price': new_std_price}, context=ctx)
def product_price_update_after_done(self, cr, uid, ids, context=None):
'''
This method adapts the price on the product when necessary
'''
for move in self.browse(cr, uid, ids, context=context):
#adapt standard price on outgoing moves if the product cost_method is 'real', so that a return
#or an inventory loss is made using the last value used for an outgoing valuation.
if move.product_id.cost_method == 'real' and move.location_dest_id.usage != 'internal':
#store the average price of the move on the move and product form
self._store_average_cost_price(cr, uid, move, context=context)
|
zioproto/radiomate
|
refs/heads/master
|
radiomate/jukeslots/main.py
|
2
|
# vim:fileencoding=utf-8:nomodified
# $Id$
#
# Copyright 2010 Claudio Pisa (clauz at ninux dot org)
#
# This file is part of RadioMate
#
# RadioMate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RadioMate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RadioMate. If not, see <http://www.gnu.org/licenses/>.
#
import random
import time
from base import *
from .. import config
__all__ = ["MainJukeSlot"]
class MainJukeSlot(JukeSlot):
"The main jukeslot, to which others connect"
def __init__(self):
JukeSlot.__init__(self, timeslot=mate.TimeSlot(), mainpassword="")
# set the global fallback playlist
self.fallbackplaylist = config.GLOBALFALLBACKPLAYLIST
self.password = None
def __generatePassword(self):
"generate a random password"
PASSLEN = 12 # password length in bytes
numbers = range(48, 58)
letters = range(65, 91) + range(97, 123)
random.seed(time.time())
passw = chr(random.choice(letters)) # must begin with a letter
for i in range(PASSLEN-1):
passw += chr(random.choice(letters + numbers))
self.password = passw
def getPassword(self):
"get a generated password"
if not self.password:
self.__generatePassword()
assert self.password
return self.password
def liquidsoapcode(self):
# main liquidsoap istance, which should always be alive
liq = """
# main liquidsoap istance
set("log.file.path",'/tmp/liq.log')
set("server.telnet", false)
set("harbor.bind_addr", "127.0.0.1")
set("harbor.port", %d)
transfunction = fun(a,b) -> sequence([fade.final(a, type="sin"), blank(duration=2.), fade.initial(b, type="sin")])
fallbackplaylist = %s
radiomate = input.harbor(password="%s", "radiomate.mp3")
takeover = input.http("%s")
radio = fallback(track_sensitive=false,
[takeover, radiomate, fallbackplaylist, blank()],
transitions = [transfunction, transfunction, transfunction, transfunction]
)
""" % (config.INTERNALJUKEPORT, self.getFallBackPlayListLiquidCode(),
self.getPassword(), config.TAKEOVERMOUNTURL)
return liq
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/periodicities/Second/Cycle_Second_200_S_360.py
|
1
|
import tests.periodicities.period_test as per
per.buildModel((360 , 'S' , 200));
|
xelphene/nmine
|
refs/heads/master
|
nmine/streamfind.py
|
1
|
import re
from ianatlds import IANA_TLD_LIST
class StreamFinder:
endings = ['.'+tld for tld in IANA_TLD_LIST]
re_dns = re.compile('([-.a-zA-Z0-9]+)')
re_windowend = re.compile('.*?([-.a-zA-Z0-9]+)$')
blacklist = ('document.do','asp.net')
def __init__(self):
self.pos=0
self._buf = ''
self.extraTLDprovider = lambda: set()
def updateTLDs(self):
print 'NEW TLDs'
pass
@classmethod
def tokenizeString(cls, s):
sf = cls()
sf.feed(s)
print 'wsr:',cls.searchWindow(s)
#return s
"""exhaustively check self._buf until there's no hope for finding
any DNS names in it."""
#assert False
rv=[]
while True:
#print 'buffer check round:',repr(self._buf)
(name, newbuf) = cls.searchWindow(self._buf)
print 'got name=%s newbuf=%s' % (repr(name),repr(newbuf))
if name!=None:
#yield name
rv += {'type':'dns', 'value':name}
elif name==None:
pass
if newbuf==self._buf:
return
self._buf = newbuf
@classmethod
def searchWindow(cls, s):
"""search for a DNS name in the buffer. If one is found, return the
DNS name and the REST of the input string. If one is not found,
return any ending of the input string the could possibly be the
start of a DNS name if more data were appended."""
#for i in range(0,len(self._buf)):
mg = cls.re_dns.search(s)
if mg:
start = mg.start()
end = mg.end()
#word = self._buf[start:end]
word = mg.group(1)
#print 'possibility:',repr(word)
for ending in cls.endings or ending in ['.'+tld for tld in self.extraTLDprovider]:
if word.lower().endswith(ending) and word.lower()!=ending and word.lower() not in cls.blacklist:
#print 'NAME:',word
return (s[0:start], word, s[end:])
# if we had a possibility but it isn't real, AND it goes
# all the way to the end of the string, then preserve the ending
if end==len(s):
return ('',None, s)
else:
return (s[0:start], None, s[end:])
else:
mg = cls.re_windowend.match(s)
if mg:
#return (s[0:len(mg.group(1)], None, mg.group(1))
pass
else:
#print 'window end match failure:',repr(s)
return (s, None, '')
def checkBuffer(self):
"""exhaustively check self._buf until there's no hope for finding
any DNS names in it."""
while True:
###Bprint 'buffer check round:',repr(self._buf)
(start, name, newbuf) = self.__class__.searchWindow(self._buf)
###print 'got name=%s newbuf=%s' % (repr(name),repr(newbuf))
if name:
yield name
if newbuf==self._buf:
return
self._buf = newbuf
def feed(self, chunk):
self._buf += chunk
for name in self.checkBuffer():
yield name
def searchPath(self, path):
f=file(path)
@classmethod
def searchFile(cls, f):
sf = cls()
chunk = f.read(1024)
while chunk:
for name in sf.feed(chunk):
yield name
chunk = f.read(1024)
@classmethod
def searchPath(cls, path):
f=file(path)
return cls.searchFile(f)
@classmethod
def searchDir(cls, path):
for (dirpath, drnames, filenames) in os.walk(path):
for filename in filenames:
path = os.path.join(dirpath,filename)
#print 'checking',path
for name in cls.searchPath(path):
yield name
|
nkfly/vm-hw1
|
refs/heads/master
|
roms/seabios/tools/checksum.py
|
131
|
#!/usr/bin/env python
# Script to report the checksum of a file.
#
# Copyright (C) 2009 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys
def main():
data = sys.stdin.read()
ords = map(ord, data)
print "sum=%x\n" % sum(ords)
if __name__ == '__main__':
main()
|
laulysta/nmt_transformer
|
refs/heads/master
|
transformer/Layers.py
|
1
|
''' Define the Layers '''
import torch.nn as nn
from transformer.SubLayers import MultiHeadAttention, PositionwiseFeedForward
__author__ = "Yu-Hsiang Huang"
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner_hid, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner_hid, dropout=dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, attn_mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
class DecoderLayer(nn.Module):
''' Compose with three layers '''
def __init__(self, d_model, d_inner_hid, n_head, d_k, d_v, dropout=0.1, use_ctx=False):
super(DecoderLayer, self).__init__()
self.use_ctx=use_ctx
if use_ctx:
self.ctx_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner_hid, dropout=dropout)
def forward(self, dec_input, enc_output, ctx_output=None, slf_attn_mask=None, dec_enc_attn_mask=None, dec_ctx_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(
dec_input, dec_input, dec_input, attn_mask=slf_attn_mask)
dec_output, dec_enc_attn = self.enc_attn(
dec_output, enc_output, enc_output, attn_mask=dec_enc_attn_mask)
if self.use_ctx:
dec_output, dec_ctx_attn = self.ctx_attn(
dec_output, ctx_output, ctx_output, attn_mask=dec_ctx_attn_mask)
dec_output = self.pos_ffn(dec_output)
if self.use_ctx:
return dec_output, dec_slf_attn, dec_enc_attn, dec_ctx_attn
else:
return dec_output, dec_slf_attn, dec_enc_attn
|
Kiiv/Sick-Beard
|
refs/heads/development
|
lib/requests/packages/chardet2/test.py
|
52
|
from __future__ import print_function
import sys, glob
sys.path.insert(0, '..')
from chardet.universaldetector import UniversalDetector
count = 0
u = UniversalDetector()
for f in glob.glob(sys.argv[1]):
print(f.ljust(60), end=' ')
u.reset()
for line in open(f, 'rb'):
u.feed(line)
if u.done: break
u.close()
result = u.result
if result['encoding']:
print(result['encoding'], 'with confidence', result['confidence'])
else:
print('******** no result')
count += 1
print(count, 'tests')
|
saghul/aiohttp
|
refs/heads/master
|
tests/test_streams.py
|
2
|
"""Tests for streams.py"""
import asyncio
import unittest
from unittest import mock
from aiohttp import streams
from aiohttp import test_utils
class StreamReaderTests(unittest.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.StreamReader(loop=self.loop, *args, **kwargs)
def test_create_waiter(self):
stream = self._make_one()
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, stream._create_waiter, 'test')
@mock.patch('aiohttp.streams.asyncio')
def test_ctor_global_loop(self, m_asyncio):
stream = streams.StreamReader()
self.assertIs(stream._loop, m_asyncio.get_event_loop.return_value)
def test_at_eof(self):
stream = self._make_one()
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_wait_eof(self):
stream = self._make_one()
wait_task = asyncio.Task(stream.wait_eof(), loop=self.loop)
def cb():
yield from asyncio.sleep(0.1, loop=self.loop)
stream.feed_eof()
asyncio.Task(cb(), loop=self.loop)
self.loop.run_until_complete(wait_task)
self.assertTrue(stream.is_eof())
self.assertIsNone(stream._eof_waiter)
def test_wait_eof_eof(self):
stream = self._make_one()
stream.feed_eof()
wait_task = asyncio.Task(stream.wait_eof(), loop=self.loop)
self.loop.run_until_complete(wait_task)
self.assertTrue(stream.is_eof())
def test_feed_empty_data(self):
stream = self._make_one()
stream.feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = self._make_one()
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = self._make_one()
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = self._make_one()
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = self._make_one()
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = self._make_one()
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
self.assertIs(data, streams.EOF_MARKER)
@mock.patch('aiohttp.streams.internal_logger')
def test_read_eof_infinit(self, internal_logger):
# Read bytes.
stream = self._make_one()
stream.feed_eof()
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.assertTrue(internal_logger.warning.called)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = self._make_one()
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = self._make_one()
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = self._make_one(limit=3)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = streams.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1')
stream.feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = self._make_one(limit=7)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = self._make_one(limit=7)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = self._make_one()
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = self._make_one()
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = self._make_one()
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
self.assertIs(line, streams.EOF_MARKER)
def test_readline_read_byte_count(self):
stream = self._make_one()
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = self._make_one()
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
data = self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = self._make_one()
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = self._make_one()
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = self._make_one()
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = self._make_one()
@asyncio.coroutine
def set_err():
stream.set_exception(ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = self._make_one()
@asyncio.coroutine
def read_a_line():
yield from stream.readline()
t = asyncio.Task(read_a_line(), loop=self.loop)
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_readany_eof(self):
stream = self._make_one()
read_task = asyncio.Task(stream.readany(), loop=self.loop)
self.loop.call_soon(stream.feed_data, b'chunk1\n')
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\n', data)
self.assertEqual(b'', stream._buffer)
def test_readany_empty_eof(self):
stream = self._make_one()
stream.feed_eof()
read_task = asyncio.Task(stream.readany(), loop=self.loop)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertIs(data, streams.EOF_MARKER)
def test_readany_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readany())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readany())
def test_read_nowait(self):
stream = self._make_one()
stream.feed_data(b'line1\n')
stream.feed_data(b'line2\n')
self.assertEqual(
stream.read_nowait(), b'line1\nline2\n')
self.assertIs(
stream.read_nowait(), streams.EOF_MARKER)
self.assertEqual(
bytes(stream._buffer), b'')
def test_read_nowait_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
stream.set_exception(ValueError())
self.assertRaises(ValueError, stream.read_nowait)
def test_read_nowait_waiter(self):
stream = self._make_one()
stream.feed_data(b'line\n')
stream._waiter = stream._create_waiter('readany')
self.assertRaises(RuntimeError, stream.read_nowait)
class FlowControlStreamReaderTests(unittest.TestCase):
def setUp(self):
self.stream = unittest.mock.Mock()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.FlowControlStreamReader(
self.stream, loop=self.loop, *args, **kwargs)
def test_read(self):
r = self._make_one()
r.feed_data(b'data')
res = self.loop.run_until_complete(r.read(1))
self.assertEqual(res, b'd')
self.assertTrue(self.stream.resume_stream.called)
self.assertTrue(self.stream.pause_stream.called)
def test_readline(self):
r = self._make_one()
r.feed_data(b'data\n')
res = self.loop.run_until_complete(r.readline())
self.assertEqual(res, b'data\n')
self.assertTrue(self.stream.resume_stream.called)
self.assertTrue(self.stream.pause_stream.called)
def test_readany(self):
r = self._make_one()
r.feed_data(b'data')
res = self.loop.run_until_complete(r.readany())
self.assertEqual(res, b'data')
self.assertTrue(self.stream.resume_stream.called)
self.assertTrue(self.stream.pause_stream.called)
def test_readexactly(self):
r = self._make_one()
r.feed_data(b'data')
res = self.loop.run_until_complete(r.readexactly(2))
self.assertEqual(res, b'da')
self.assertTrue(self.stream.resume_stream.called)
self.assertTrue(self.stream.pause_stream.called)
class DataQueueTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.buffer = streams.DataQueue(loop=self.loop)
def tearDown(self):
self.loop.close()
def test_is_eof(self):
self.assertFalse(self.buffer.is_eof())
self.buffer.feed_eof()
self.assertTrue(self.buffer.is_eof())
def test_at_eof(self):
self.assertFalse(self.buffer.at_eof())
self.buffer.feed_eof()
self.assertTrue(self.buffer.at_eof())
self.buffer._buffer.append(object())
self.assertFalse(self.buffer.at_eof())
def test_feed_data(self):
item = object()
self.buffer.feed_data(item)
self.assertEqual([item], list(self.buffer._buffer))
def test_feed_eof(self):
self.buffer.feed_eof()
self.assertTrue(self.buffer._eof)
def test_read(self):
item = object()
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_data(item)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertIs(item, data)
def test_read_eof(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
streams.EofStream, self.loop.run_until_complete, read_task)
def test_read_cancelled(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIsInstance(self.buffer._waiter, asyncio.Future)
read_task.cancel()
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, read_task)
self.assertTrue(self.buffer._waiter.cancelled())
self.buffer.feed_data(b'test')
self.assertIsNone(self.buffer._waiter)
def test_read_until_eof(self):
item = object()
self.buffer.feed_data(item)
self.buffer.feed_eof()
data = self.loop.run_until_complete(self.buffer.read())
self.assertIs(data, item)
self.assertRaises(
streams.EofStream,
self.loop.run_until_complete, self.buffer.read())
def test_read_exception(self):
self.buffer.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, self.buffer.read())
def test_read_exception_with_data(self):
val = object()
self.buffer.feed_data(val)
self.buffer.set_exception(ValueError())
self.assertIs(val, self.loop.run_until_complete(self.buffer.read()))
self.assertRaises(
ValueError, self.loop.run_until_complete, self.buffer.read())
def test_read_exception_on_wait(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIsInstance(self.buffer._waiter, asyncio.Future)
self.buffer.feed_eof()
self.buffer.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, read_task)
def test_exception(self):
self.assertIsNone(self.buffer.exception())
exc = ValueError()
self.buffer.set_exception(exc)
self.assertIs(self.buffer.exception(), exc)
def test_exception_waiter(self):
@asyncio.coroutine
def set_err():
self.buffer.set_exception(ValueError())
t1 = asyncio.Task(self.buffer.read(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
self.assertRaises(ValueError, t1.result)
class FlowControlDataQueueTests(unittest.TestCase):
def setUp(self):
self.stream = unittest.mock.Mock()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.buffer = streams.FlowControlDataQueue(self.stream, loop=self.loop)
def tearDown(self):
self.loop.close()
def test_stream(self):
item = object()
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_data(item)
self.loop.call_soon(cb)
self.loop.run_until_complete(read_task)
self.assertTrue(self.stream.resume_stream.called)
self.assertTrue(self.stream.pause_stream.called)
class ChunksQueueTests(DataQueueTests):
def setUp(self):
super().setUp()
self.buffer = streams.ChunksQueue(loop=self.loop)
def test_read_eof(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_eof()
self.loop.call_soon(cb)
self.loop.run_until_complete(read_task)
self.assertTrue(self.buffer.at_eof())
def test_read_until_eof(self):
item = object()
self.buffer.feed_data(item)
self.buffer.feed_eof()
data = self.loop.run_until_complete(self.buffer.read())
self.assertIs(data, item)
thing = self.loop.run_until_complete(self.buffer.read())
self.assertEqual(thing, b'')
self.assertTrue(self.buffer.at_eof())
def test_readany(self):
self.assertIs(self.buffer.read.__func__, self.buffer.readany.__func__)
class FlowControlChunksQueueTests(FlowControlDataQueueTests):
def setUp(self):
super().setUp()
self.buffer = streams.FlowControlChunksQueue(self.stream,
loop=self.loop)
def test_readany(self):
self.assertIs(self.buffer.read.__func__, self.buffer.readany.__func__)
|
dchaplinsky/pep.org.ua
|
refs/heads/master
|
pepdb/tasks/management/commands/load_companies.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from random import randrange
import requests
import os.path
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
import logging
from io import TextIOWrapper, open
from unicodecsv import DictReader
from itertools import islice
from zipfile import ZipFile
from cStringIO import StringIO
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from elasticsearch_dsl import Index
from elasticsearch_dsl.connections import connections
from elasticsearch.helpers import bulk
from dateutil.parser import parse
from tasks.elastic_models import EDRPOU
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("reader")
class EDRImportException(Exception):
pass
class EDR_Reader(object):
"""
Simple reader class which allows to iterate over Zipped/not Zipped XML/CSV file
"""
def __init__(self, in_file, timestamp, revision, file_type="zip"):
"""
Initializes EDR_Reader class
:param in_file: file object (zipped or not)
:type in_file: StringIO or file handler
:param timestamp: date of export of the file
:type timestamp: datetime
:param revision: revision of the dump
:type revision: string
:param file_type: type of the file (usually extension)
:type file_type: string
"""
self.file = in_file
self.file_type = file_type
self.timestamp = timestamp
self.revision = revision
def iter_docs(self):
"""
Reads input file record by record.
:returns: iterator over company records from registry
:rtype: collections.Iterable[dict]
"""
if self.file_type == "zip":
with ZipFile(self.file) as zip_arch:
for fname in zip_arch.namelist():
try:
dec_fname = unicode(fname)
except UnicodeDecodeError:
dec_fname = fname.decode("cp866")
if "uo" in dec_fname.lower() or "юо" in dec_fname.lower():
logger.info("Reading {} file from archive {}".format(dec_fname, self.file))
if dec_fname.lower().endswith(".xml"):
with zip_arch.open(fname, 'r') as fp_raw:
for l in self._iter_xml(fp_raw):
yield EDRPOU(**l).to_dict(True)
if dec_fname.lower().endswith(".csv"):
with zip_arch.open(fname, 'r') as fp_raw:
for l in self._iter_csv(fp_raw):
yield EDRPOU(**l).to_dict(True)
elif self.file_type == "xml":
for l in self._iter_xml(self.file):
yield EDRPOU(**l).to_dict(True)
elif self.file_type == "csv":
for l in self._iter_csv(self.file):
yield EDRPOU(**l).to_dict(True)
def _iter_xml(self, fp_raw):
"""
Regex magic is required to
cover records that was incorrectly exported and incomplete, thus
make whole XML file invalid (happens sometime)
"""
with TextIOWrapper(fp_raw, encoding="cp1251") as fp:
mapping = {
'NAME': 'name',
'SHORT_NAME': 'short_name',
'EDRPOU': 'edrpou',
'ADDRESS': 'location',
'BOSS': 'head',
'KVED': 'company_profile',
'STAN': 'status',
'FOUNDERS': 'founders',
"Найменування": 'name',
"Скорочена_назва": 'short_name',
"Код_ЄДРПОУ": 'edrpou',
"Місцезнаходження": 'location',
"ПІБ_керівника": 'head',
"Основний_вид_діяльності": 'company_profile',
"Стан": 'status',
"C0": ""
}
content = fp.read()
if "RECORD" in content[:1000]:
regex = '<RECORD>.*?</RECORD>'
else:
regex = '<ROW>.*?</ROW>'
for i, chunk in enumerate(re.finditer(regex, content, flags=re.S | re.U)):
company = {}
founders_list = []
try:
# Fucking ET!
etree = ET.fromstring(chunk.group(0).replace("Місцезнаходження", "ADDRESS").encode("utf-8"))
except ParseError:
logger.error('Cannot parse record #{}, {}'.format(i, chunk))
continue
for el in etree.getchildren():
if el.tag == 'EDRPOU' and el.text and el.text.lstrip('0'):
company[mapping[el.tag]] = int(el.text)
elif el.tag == 'FOUNDERS':
for founder in el.getchildren():
founders_list.append(founder.text)
else:
if el.tag in mapping:
company[mapping[el.tag]] = el.text
company[mapping['FOUNDERS']] = founders_list
company["last_update"] = self.timestamp
company["file_revision"] = self.revision
if i and i % 50000 == 0:
logger.warning('Read {} companies from XML feed'.format(i))
yield company
def _iter_csv(self, fp_raw):
r = DictReader(fp_raw, delimiter=str(";"), encoding="cp1251")
mapping = {
"Найменування": 'name',
"Скорочена назва": 'short_name',
"Код ЄДРПОУ": 'edrpou',
"Місцезнаходження": 'location',
"ПІБ керівника": 'head',
"Основний вид діяльності": 'company_profile',
"Стан": 'status',
}
for i, chunk in enumerate(r):
company = {}
for k, v in chunk.items():
if k.strip():
if mapping[k] == "edrpou" and v:
company[mapping[k]] = int(v)
else:
company[mapping[k]] = v
company['founders'] = []
company["last_update"] = self.timestamp
company["file_revision"] = self.revision
if i and i % 50000 == 0:
logger.warning('Read {} companies from CSV feed'.format(i))
yield company
class Command(BaseCommand):
help = ('Loads XML with data from registry of companies of Ukraine into '
'elastic index for further matching with companies in DB')
def add_arguments(self, parser):
parser.add_argument(
'--revision',
help='EDR dump revision to retrieve (leave empty to retrieve latest)',
)
parser.add_argument(
'--guid',
default="06bbccbd-e19c-40d5-9e18-447b110c0b4c",
help='Dataset to retrieve',
)
parser.add_argument(
'--filename',
help='Filename of the dump to load file manually',
)
parser.add_argument(
'--dump_date',
help='Date of dump, obtained manually, day first',
)
def handle(self, *args, **options):
self.proxies = {}
if hasattr(settings, "PROXY"):
self.proxies["http"] = settings.PROXY
self.proxies["https"] = settings.PROXY
GUID = options["guid"]
fp = None
if not options["revision"]:
latest = EDRPOU.search().aggs.metric("max_last_update", "max", field="last_update")[:1].execute()
if latest:
update_after = latest[0].last_update
self.stdout.write("Only loading dumps after {}".format(update_after))
else:
raise EDRImportException("Current index is empty, please run manual import. For fuck sake")
if not options["filename"]:
data_url = None
timestamp = None
revision = None
try:
response = requests.get(
"https://data.gov.ua/api/3/action/resource_show",
{"id": GUID, "nocache": randrange(100)}
).json()
if not response.get("success"):
self.stderr.write("Unsuccessful response from api.")
return
revisions = sorted(
response["result"]["resource_revisions"],
key=lambda x: parse(x["resource_created"])
)
for rev in revisions:
revision = rev["url"].strip("/").rsplit('/', 1)[-1]
if not options["revision"]:
timestamp = parse(rev["resource_created"])
if update_after is None or update_after < timestamp:
data_url = rev["url"]
break
if revision == options["revision"]:
timestamp = parse(rev["resource_created"])
data_url = rev["url"]
break
except (TypeError, IndexError, KeyError):
self.stderr.write("Cannot obtain information about dump file")
raise
if not data_url:
self.stderr.write("Can not get dataset url from api.")
return
self.stdout.write("Loading data of revision: {}, created at: {}".format(revision, timestamp))
r = requests.get(data_url, stream=True)
ext = r.headers["Content-Type"].split("/")[-1]
ext = ext.lower().lstrip(".")
if ext not in ["zip", "xml", "csv"]:
self.stderr.write("Unsupported dataset file type: {}".format(ext))
return
reader = EDR_Reader(StringIO(r.content), timestamp, revision, ext)
elif options["revision"] and options["dump_date"]:
dump_date = timezone.make_aware(parse(options["dump_date"], dayfirst=True))
_, ext = os.path.splitext(options["filename"])
fp = open(options["filename"], "rb")
reader = EDR_Reader(fp, dump_date, options["revision"], ext.lower().lstrip("."))
else:
self.stderr.write("You should provide (possibly fake) revision id and date of dump when loading files manually")
iterator = reader.iter_docs()
first_portion = list(islice(iterator, 1000))
if first_portion:
Index(EDRPOU._doc_type.index).delete(ignore=404)
EDRPOU.init()
es = connections.get_connection()
bulk(es, first_portion)
bulk(es, iterator, chunk_size=10000)
else:
raise EDRImportException("Less than 1000 valid records, for fuck sake")
if fp:
fp.close()
|
lancezlin/pylearn2
|
refs/heads/master
|
pylearn2/testing/__init__.py
|
49
|
""" Functionality for supporting unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import functools
from theano import config
def no_debug_mode(fn):
"""
A decorator used to say a test is too slow to run in debug
mode.
"""
# Use functools.wraps so that wrapped.func_name matches
# fn.func_name. Otherwise nosetests won't recognize the
# returned function as a test.
@functools.wraps(fn)
def wrapped(*args, **kwargs):
orig_mode = config.mode
if orig_mode in ["DebugMode", "DEBUG_MODE"]:
config.mode = "FAST_RUN"
try:
return fn(*args, **kwargs)
finally:
config.mode = orig_mode
return wrapped
|
JioEducation/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/features/__init__.py
|
12133432
| |
petteyg/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/en_GB/__init__.py
|
12133432
| |
chrisfranklin/badasschat
|
refs/heads/master
|
badasschat/__init__.py
|
12133432
| |
kirti3192/spoken-website
|
refs/heads/master
|
cron/spoken_search/whoosh/scoring.py
|
94
|
# Copyright 2008 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains classes for scoring (and sorting) search results.
"""
from __future__ import division
from math import log, pi
from whoosh.compat import iteritems
# Base classes
class WeightingModel(object):
"""Abstract base class for scoring models. A WeightingModel object provides
a method, ``scorer``, which returns an instance of
:class:`whoosh.scoring.Scorer`.
Basically, WeightingModel objects store the configuration information for
the model (for example, the values of B and K1 in the BM25F model), and
then creates a scorer instance based on additional run-time information
(the searcher, the fieldname, and term text) to do the actual scoring.
"""
use_final = False
def idf(self, searcher, fieldname, text):
"""Returns the inverse document frequency of the given term.
"""
parent = searcher.get_parent()
n = parent.doc_frequency(fieldname, text)
dc = parent.doc_count_all()
return log(dc / (n + 1)) + 1
def scorer(self, searcher, fieldname, text, qf=1):
"""Returns an instance of :class:`whoosh.scoring.Scorer` configured
for the given searcher, fieldname, and term text.
"""
raise NotImplementedError(self.__class__.__name__)
def final(self, searcher, docnum, score):
"""Returns a final score for each document. You can use this method
in subclasses to apply document-level adjustments to the score, for
example using the value of stored field to influence the score
(although that would be slow).
WeightingModel sub-classes that use ``final()`` should have the
attribute ``use_final`` set to ``True``.
:param searcher: :class:`whoosh.searching.Searcher` for the index.
:param docnum: the doc number of the document being scored.
:param score: the document's accumulated term score.
:rtype: float
"""
return score
class BaseScorer(object):
"""Base class for "scorer" implementations. A scorer provides a method for
scoring a document, and sometimes methods for rating the "quality" of a
document and a matcher's current "block", to implement quality-based
optimizations.
Scorer objects are created by WeightingModel objects. Basically,
WeightingModel objects store the configuration information for the model
(for example, the values of B and K1 in the BM25F model), and then creates
a scorer instance.
"""
def supports_block_quality(self):
"""Returns True if this class supports quality optimizations.
"""
return False
def score(self, matcher):
"""Returns a score for the current document of the matcher.
"""
raise NotImplementedError(self.__class__.__name__)
def max_quality(self):
"""Returns the *maximum limit* on the possible score the matcher can
give. This can be an estimate and not necessarily the actual maximum
score possible, but it must never be less than the actual maximum
score.
"""
raise NotImplementedError(self.__class__.__name__)
def block_quality(self, matcher):
"""Returns the *maximum limit* on the possible score the matcher can
give **in its current "block"** (whatever concept of "block" the
backend might use). This can be an estimate and not necessarily the
actual maximum score possible, but it must never be less than the
actual maximum score.
If this score is less than the minimum score
required to make the "top N" results, then we can tell the matcher to
skip ahead to another block with better "quality".
"""
raise NotImplementedError(self.__class__.__name__)
# Scorer that just returns term weight
class WeightScorer(BaseScorer):
"""A scorer that simply returns the weight as the score. This is useful
for more complex weighting models to return when they are asked for a
scorer for fields that aren't scorable (don't store field lengths).
"""
def __init__(self, maxweight):
self._maxweight = maxweight
def supports_block_quality(self):
return True
def score(self, matcher):
return matcher.weight()
def max_quality(self):
return self._maxweight
def block_quality(self, matcher):
return matcher.block_max_weight()
@classmethod
def for_(cls, searcher, fieldname, text):
ti = searcher.term_info(fieldname, text)
return cls(ti.max_weight())
# Base scorer for models that only use weight and field length
class WeightLengthScorer(BaseScorer):
"""Base class for scorers where the only per-document variables are term
weight and field length.
Subclasses should override the ``_score(weight, length)`` method to return
the score for a document with the given weight and length, and call the
``setup()`` method at the end of the initializer to set up common
attributes.
"""
def setup(self, searcher, fieldname, text):
"""Initializes the scorer and then does the busy work of
adding the ``dfl()`` function and maximum quality attribute.
This method assumes the initializers of WeightLengthScorer subclasses
always take ``searcher, offset, fieldname, text`` as the first three
arguments. Any additional arguments given to this method are passed
through to the initializer.
Note: this method calls ``self._score()``, so you should only call it
in the initializer after setting up whatever attributes ``_score()``
depends on::
class MyScorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, parm=1.0):
self.parm = parm
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
return (weight / (length + 1)) * self.parm
"""
ti = searcher.term_info(fieldname, text)
if not searcher.schema[fieldname].scorable:
return WeightScorer(ti.max_weight())
self.dfl = lambda docid: searcher.doc_field_length(docid, fieldname, 1)
self._maxquality = self._score(ti.max_weight(), ti.min_length())
def supports_block_quality(self):
return True
def score(self, matcher):
return self._score(matcher.weight(), self.dfl(matcher.id()))
def max_quality(self):
return self._maxquality
def block_quality(self, matcher):
return self._score(matcher.block_max_weight(),
matcher.block_min_length())
def _score(self, weight, length):
# Override this method with the actual scoring function
raise NotImplementedError(self.__class__.__name__)
# WeightingModel implementations
# Debugging model
class DebugModel(WeightingModel):
def __init__(self):
self.log = []
def scorer(self, searcher, fieldname, text, qf=1):
return DebugScorer(searcher, fieldname, text, self.log)
class DebugScorer(BaseScorer):
def __init__(self, searcher, fieldname, text, log):
ti = searcher.term_info(fieldname, text)
self._maxweight = ti.max_weight()
self.searcher = searcher
self.fieldname = fieldname
self.text = text
self.log = log
def supports_block_quality(self):
return True
def score(self, matcher):
fieldname, text = self.fieldname, self.text
docid = matcher.id()
w = matcher.weight()
length = self.searcher.doc_field_length(docid, fieldname)
self.log.append((fieldname, text, docid, w, length))
return w
def max_quality(self):
return self._maxweight
def block_quality(self, matcher):
return matcher.block_max_weight()
# BM25F Model
def bm25(idf, tf, fl, avgfl, B, K1):
# idf - inverse document frequency
# tf - term frequency in the current document
# fl - field length in the current document
# avgfl - average field length across documents in collection
# B, K1 - free paramters
return idf * ((tf * (K1 + 1)) / (tf + K1 * ((1 - B) + B * fl / avgfl)))
class BM25F(WeightingModel):
"""Implements the BM25F scoring algorithm.
"""
def __init__(self, B=0.75, K1=1.2, **kwargs):
"""
>>> from whoosh import scoring
>>> # Set a custom B value for the "content" field
>>> w = scoring.BM25F(B=0.75, content_B=1.0, K1=1.5)
:param B: free parameter, see the BM25 literature. Keyword arguments of
the form ``fieldname_B`` (for example, ``body_B``) set field-
specific values for B.
:param K1: free parameter, see the BM25 literature.
"""
self.B = B
self.K1 = K1
self._field_B = {}
for k, v in iteritems(kwargs):
if k.endswith("_B"):
fieldname = k[:-2]
self._field_B[fieldname] = v
def supports_block_quality(self):
return True
def scorer(self, searcher, fieldname, text, qf=1):
if not searcher.schema[fieldname].scorable:
return WeightScorer.for_(searcher, fieldname, text)
if fieldname in self._field_B:
B = self._field_B[fieldname]
else:
B = self.B
return BM25FScorer(searcher, fieldname, text, B, self.K1, qf=qf)
class BM25FScorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, B, K1, qf=1):
# IDF and average field length are global statistics, so get them from
# the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
self.idf = parent.idf(fieldname, text)
self.avgfl = parent.avg_field_length(fieldname) or 1
self.B = B
self.K1 = K1
self.qf = qf
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
s = bm25(self.idf, weight, length, self.avgfl, self.B, self.K1)
return s
# DFree model
def dfree(tf, cf, qf, dl, fl):
# tf - term frequency in current document
# cf - term frequency in collection
# qf - term frequency in query
# dl - field length in current document
# fl - total field length across all documents in collection
prior = tf / dl
post = (tf + 1.0) / (dl + 1.0)
invpriorcol = fl / cf
norm = tf * log(post / prior)
return qf * norm * (tf * (log(prior * invpriorcol))
+ (tf + 1.0) * (log(post * invpriorcol))
+ 0.5 * log(post / prior))
class DFree(WeightingModel):
"""Implements the DFree scoring model from Terrier.
See http://terrier.org/
"""
def supports_block_quality(self):
return True
def scorer(self, searcher, fieldname, text, qf=1):
if not searcher.schema[fieldname].scorable:
return WeightScorer.for_(searcher, fieldname, text)
return DFreeScorer(searcher, fieldname, text, qf=qf)
class DFreeScorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, qf=1):
# Total term weight and total field length are global statistics, so
# get them from the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
self.cf = parent.weight(fieldname, text)
self.fl = parent.field_length(fieldname)
self.qf = qf
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
return dfree(weight, self.cf, self.qf, length, self.fl)
# PL2 model
rec_log2_of_e = 1.0 / log(2)
def pl2(tf, cf, qf, dc, fl, avgfl, c):
# tf - term frequency in the current document
# cf - term frequency in the collection
# qf - term frequency in the query
# dc - doc count
# fl - field length in the current document
# avgfl - average field length across all documents
# c -free parameter
TF = tf * log(1.0 + (c * avgfl) / fl)
norm = 1.0 / (TF + 1.0)
f = cf / dc
return norm * qf * (TF * log(1.0 / f)
+ f * rec_log2_of_e
+ 0.5 * log(2 * pi * TF)
+ TF * (log(TF) - rec_log2_of_e))
class PL2(WeightingModel):
"""Implements the PL2 scoring model from Terrier.
See http://terrier.org/
"""
def __init__(self, c=1.0):
self.c = c
def scorer(self, searcher, fieldname, text, qf=1):
if not searcher.schema[fieldname].scorable:
return WeightScorer.for_(searcher, fieldname, text)
return PL2Scorer(searcher, fieldname, text, self.c, qf=qf)
class PL2Scorer(WeightLengthScorer):
def __init__(self, searcher, fieldname, text, c, qf=1):
# Total term weight, document count, and average field length are
# global statistics, so get them from the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
self.cf = parent.frequency(fieldname, text)
self.dc = parent.doc_count_all()
self.avgfl = parent.avg_field_length(fieldname) or 1
self.c = c
self.qf = qf
self.setup(searcher, fieldname, text)
def _score(self, weight, length):
return pl2(weight, self.cf, self.qf, self.dc, length, self.avgfl,
self.c)
# Simple models
class Frequency(WeightingModel):
def scorer(self, searcher, fieldname, text, qf=1):
maxweight = searcher.term_info(fieldname, text).max_weight()
return WeightScorer(maxweight)
class TF_IDF(WeightingModel):
def scorer(self, searcher, fieldname, text, qf=1):
# IDF is a global statistic, so get it from the top-level searcher
parent = searcher.get_parent() # Returns self if no parent
idf = parent.idf(fieldname, text)
maxweight = searcher.term_info(fieldname, text).max_weight()
return TF_IDFScorer(maxweight, idf)
class TF_IDFScorer(BaseScorer):
def __init__(self, maxweight, idf):
self._maxquality = maxweight * idf
self.idf = idf
def supports_block_quality(self):
return True
def score(self, matcher):
return matcher.weight() * self.idf
def max_quality(self):
return self._maxquality
def block_quality(self, matcher):
return matcher.block_max_weight() * self.idf
# Utility models
class Weighting(WeightingModel):
"""This class provides backwards-compatibility with the old weighting
class architecture, so any existing custom scorers don't need to be
rewritten.
"""
def scorer(self, searcher, fieldname, text, qf=1):
return self.CompatibilityScorer(searcher, fieldname, text, self.score)
def score(self, searcher, fieldname, text, docnum, weight):
raise NotImplementedError
class CompatibilityScorer(BaseScorer):
def __init__(self, searcher, fieldname, text, scoremethod):
self.searcher = searcher
self.fieldname = fieldname
self.text = text
self.scoremethod = scoremethod
def score(self, matcher):
return self.scoremethod(self.searcher, self.fieldname, self.text,
matcher.id(), matcher.weight())
class FunctionWeighting(WeightingModel):
"""Uses a supplied function to do the scoring. For simple scoring functions
and experiments this may be simpler to use than writing a full weighting
model class and scorer class.
The function should accept the arguments
``searcher, fieldname, text, matcher``.
For example, the following function will score documents based on the
earliest position of the query term in the document::
def pos_score_fn(searcher, fieldname, text, matcher):
poses = matcher.value_as("positions")
return 1.0 / (poses[0] + 1)
pos_weighting = scoring.FunctionWeighting(pos_score_fn)
with myindex.searcher(weighting=pos_weighting) as s:
results = s.search(q)
Note that the searcher passed to the function may be a per-segment searcher
for performance reasons. If you want to get global statistics inside the
function, you should use ``searcher.get_parent()`` to get the top-level
searcher. (However, if you are using global statistics, you should probably
write a real model/scorer combo so you can cache them on the object.)
"""
def __init__(self, fn):
self.fn = fn
def scorer(self, searcher, fieldname, text, qf=1):
return self.FunctionScorer(self.fn, searcher, fieldname, text, qf=qf)
class FunctionScorer(BaseScorer):
def __init__(self, fn, searcher, fieldname, text, qf=1):
self.fn = fn
self.searcher = searcher
self.fieldname = fieldname
self.text = text
self.qf = qf
def score(self, matcher):
return self.fn(self.searcher, self.fieldname, self.text, matcher)
class MultiWeighting(WeightingModel):
"""Chooses from multiple scoring algorithms based on the field.
"""
def __init__(self, default, **weightings):
"""The only non-keyword argument specifies the default
:class:`Weighting` instance to use. Keyword arguments specify
Weighting instances for specific fields.
For example, to use ``BM25`` for most fields, but ``Frequency`` for
the ``id`` field and ``TF_IDF`` for the ``keys`` field::
mw = MultiWeighting(BM25(), id=Frequency(), keys=TF_IDF())
:param default: the Weighting instance to use for fields not
specified in the keyword arguments.
"""
self.default = default
# Store weighting functions by field name
self.weightings = weightings
def scorer(self, searcher, fieldname, text, qf=1):
w = self.weightings.get(fieldname, self.default)
return w.scorer(searcher, fieldname, text, qf=qf)
class ReverseWeighting(WeightingModel):
"""Wraps a weighting object and subtracts the wrapped model's scores from
0, essentially reversing the weighting model.
"""
def __init__(self, weighting):
self.weighting = weighting
def scorer(self, searcher, fieldname, text, qf=1):
subscorer = self.weighting.scorer(searcher, fieldname, text, qf=qf)
return ReverseWeighting.ReverseScorer(subscorer)
class ReverseScorer(BaseScorer):
def __init__(self, subscorer):
self.subscorer = subscorer
def supports_block_quality(self):
return self.subscorer.supports_block_quality()
def score(self, matcher):
return 0 - self.subscorer.score(matcher)
def max_quality(self):
return 0 - self.subscorer.max_quality()
def block_quality(self, matcher):
return 0 - self.subscorer.block_quality(matcher)
#class PositionWeighting(WeightingModel):
# def __init__(self, reversed=False):
# self.reversed = reversed
#
# def scorer(self, searcher, fieldname, text, qf=1):
# return PositionWeighting.PositionScorer()
#
# class PositionScorer(BaseScorer):
# def score(self, matcher):
# p = min(span.pos for span in matcher.spans())
# if self.reversed:
# return p
# else:
# return 0 - p
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/tools/gyp/test/mac/gyptest-clang-cxx-language-standard.py
|
264
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that CLANG_CXX_LANGUAGE_STANDARD works.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
test.run_gyp('clang-cxx-language-standard.gyp',
chdir='clang-cxx-language-standard')
test.build('clang-cxx-language-standard.gyp', test.ALL,
chdir='clang-cxx-language-standard')
test.pass_test()
|
evonove/mkm-sdk
|
refs/heads/master
|
mkmsdk/serializer.py
|
1
|
from xml.sax.saxutils import XMLGenerator
from io import StringIO
from .exceptions import SerializationException
class XMLSerializer:
"""Serializes data to XML"""
def __init__(self):
self.generator = None
def serialize(self, data):
"""
Serializes data to XML so that it can be
sent to backend, if data is not a dictionary
raises a SerializationException
Params:
`data`: A dictionary containing the data to serialize
Return:
`xml`: Returns a string containing data serialized to XML
"""
if not isinstance(data, dict):
raise SerializationException("Can't serialize data, must be a dictionary.")
stream = StringIO()
self.generator = XMLGenerator(stream, "utf-8")
self.generator.startDocument()
self.generator.startElement("request", {})
self._parse(data)
self.generator.endElement("request")
self.generator.endDocument()
return stream.getvalue()
def _parse(self, data, previous_element_tag=None):
"""
Parses data and creates the relative elements
Params:
`data`: Data to parse
`previous_element_tag`: When parsing a list we pass the previous element tag
"""
if isinstance(data, dict):
for key in data:
value = data[key]
self._parse(value, key)
elif isinstance(data, (list, tuple)):
for item in data:
self.generator.startElement(previous_element_tag, {})
self._parse(item, previous_element_tag)
self.generator.endElement(previous_element_tag)
else:
self.generator.startElement(previous_element_tag, {})
self.generator.characters("%s" % data)
self.generator.endElement(previous_element_tag)
|
Laurawly/tvm-1
|
refs/heads/master
|
version.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is the global script that set the version information of TVM.
This script runs and update all the locations that related to versions
List of affected files:
- tvm-root/python/tvm/_ffi/libinfo.py
- tvm-root/include/tvm/runtime/c_runtime_api.h
- tvm-root/conda/recipe/meta.yaml
- tvm-root/web/package.json
"""
import os
import re
import argparse
import logging
import subprocess
# Modify the following two settings during release
# ---------------------------------------------------
# Current version
# We use the version of the incoming release for code
# that is under development
__version__ = "0.8.dev0"
# Most recent tag, used for git describe validation
# set this value to be the most recent release tag
# before this development cycle.
__most_recent_tag__ = "v0.7.0"
# ---------------------------------------------------
PROJ_ROOT = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
def py_str(cstr):
return cstr.decode("utf-8")
def git_describe_version():
"""Get PEP-440 compatible public and local version using git describe.
Returns
-------
pub_ver: str
Public version.
local_ver: str
Local version (with additional label appended to pub_ver).
Note
----
We follow PEP 440's convention of public version
and local versions.
Here are some examples:
- pub_ver = '0.7.0', local_ver = '0.7.0':
We are at the 0.7.0 release.
- pub_ver = '0.8.dev94', local_ver = '0.8.dev94+g0d07a329e':
We are at the the 0.8 development cycle.
The current source contains 94 additional commits
after the most recent tag(v0.7.0),
the git short hash tag of the current commit is 0d07a329e.
"""
cmd = ["git", "describe", "--tags"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=PROJ_ROOT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = py_str(out)
if msg.find("not a git repository") != -1:
return __version__, __version__
logging.warning("git describe: %s, use %s", msg, __version__)
return __version__, __version__
describe = py_str(out).strip()
arr_info = describe.split("-")
if not arr_info[0].endswith(__most_recent_tag__):
logging.warning(
"%s does not match most recent tag %s, fallback to %s",
describe,
__most_recent_tag__,
__version__,
)
return __version__, __version__
# Remove the v prefix, mainly to be robust
# to the case where v is not presented as well.
if arr_info[0].startswith("v"):
arr_info[0] = arr_info[0][1:]
# hit the exact tag
if len(arr_info) == 1:
return arr_info[0], arr_info[0]
if len(arr_info) != 3:
logging.warning("Invalid output from git describe %s", describe)
return __version__, __version__
dev_pos = __version__.find(".dev")
pub_ver = "%s.dev%s" % (__version__[:dev_pos], arr_info[1])
local_ver = "%s+%s" % (pub_ver, arr_info[2])
return pub_ver, local_ver
# Implementations
def update(file_name, pattern, repl, dry_run=False):
update = []
hit_counter = 0
need_update = False
for l in open(file_name):
result = re.findall(pattern, l)
if result:
assert len(result) == 1
hit_counter += 1
if result[0] != repl:
l = re.sub(pattern, repl, l)
need_update = True
print("%s: %s -> %s" % (file_name, result[0], repl))
else:
print("%s: version is already %s" % (file_name, repl))
update.append(l)
if hit_counter != 1:
raise RuntimeError("Cannot find version in %s" % file_name)
if need_update and not dry_run:
with open(file_name, "w") as output_file:
for l in update:
output_file.write(l)
def sync_version(pub_ver, local_ver, dry_run):
"""Synchronize version."""
# python uses the PEP-440: local version
update(
os.path.join(PROJ_ROOT, "python", "tvm", "_ffi", "libinfo.py"),
r"(?<=__version__ = \")[.0-9a-z\+]+",
local_ver,
dry_run,
)
# Use public version for other parts for now
# Note that full git hash is already available in libtvm
# C++ header
update(
os.path.join(PROJ_ROOT, "include", "tvm", "runtime", "c_runtime_api.h"),
r'(?<=TVM_VERSION ")[.0-9a-z\+]+',
pub_ver,
dry_run,
)
# conda
update(
os.path.join(PROJ_ROOT, "conda", "recipe", "meta.yaml"),
r"(?<=version = ')[.0-9a-z\+]+",
pub_ver,
dry_run,
)
# web
# change to pre-release convention by npm
dev_pos = pub_ver.find(".dev")
npm_ver = pub_ver if dev_pos == -1 else "%s.0-%s" % (pub_ver[:dev_pos], pub_ver[dev_pos + 1 :])
update(
os.path.join(PROJ_ROOT, "web", "package.json"),
r'(?<="version": ")[.0-9a-z\-\+]+',
npm_ver,
dry_run,
)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Detect and sychnronize version.")
parser.add_argument(
"--print-version",
action="store_true",
help="Print version to the command line. No changes is applied to files.",
)
parser.add_argument(
"--git-describe",
action="store_true",
help="Use git describe to generate development version.",
)
parser.add_argument("--dry-run", action="store_true")
opt = parser.parse_args()
pub_ver, local_ver = __version__, __version__
if opt.git_describe:
pub_ver, local_ver = git_describe_version()
if opt.print_version:
print(local_ver)
else:
sync_version(pub_ver, local_ver, opt.dry_run)
if __name__ == "__main__":
main()
|
Pivosgroup/google-git-repo
|
refs/heads/stable
|
subcmds/version.py
|
90
|
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from command import Command, MirrorSafeCommand
from git_command import git
from git_refs import HEAD
class Version(Command, MirrorSafeCommand):
wrapper_version = None
wrapper_path = None
common = False
helpSummary = "Display the version of repo"
helpUsage = """
%prog
"""
def Execute(self, opt, args):
rp = self.manifest.repoProject
rem = rp.GetRemote(rp.remote.name)
print('repo version %s' % rp.work_git.describe(HEAD))
print(' (from %s)' % rem.url)
if Version.wrapper_path is not None:
print('repo launcher version %s' % Version.wrapper_version)
print(' (from %s)' % Version.wrapper_path)
print(git.version().strip())
print('Python %s' % sys.version)
|
AbsentMoniker/ECE463Honors
|
refs/heads/master
|
web2py/applications/QuizMe/languages/default.py
|
180
|
# coding: utf8
{
'!langcode!': 'en-us',
'!langname!': 'English (US)',
'%s %%(shop)': '%s %%(shop)',
'%s %%(shop[0])': '%s %%(shop[0])',
'%s %%{quark[0]}': '%s %%{quark[0]}',
'%s %%{shop[0]}': '%s %%{shop[0]}',
'%s %%{shop}': '%s %%{shop}',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'@markmin\x01**Hello World**': '**Hello World**',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Ajax Recipes': 'Ajax Recipes',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Buy this book': 'Buy this book',
'Cannot be empty': 'Cannot be empty',
'Check to delete': 'Check to delete',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Created By',
'Created On': 'Created On',
'customize me!': 'customize me!',
'Database': 'Database',
'DB Model': 'DB Model',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'Download': 'Download',
'E-mail': 'E-mail',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'enter date and time as %(format)s': 'enter date and time as %(format)s',
'Errors': 'Errors',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'Group ID',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'Hello World': 'Hello World',
'Hello World ## comment': 'Hello World ',
'Hello World## comment': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Is Active': 'Is Active',
'Last name': 'Last name',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Login',
'Logout': 'Logout',
'Lost Password': 'Lost Password',
'Lost password?': 'Lost password?',
'Menu Model': 'Menu Model',
'Modified By': 'Modified By',
'Modified On': 'Modified On',
'My Sites': 'My Sites',
'Name': 'Name',
'Object or table name': 'Object or table name',
'Online examples': 'Online examples',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'please input your password again': 'please input your password again',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'Profile': 'Profile',
'Python': 'Python',
'Quick Examples': 'Quick Examples',
'Recipes': 'Recipes',
'Record ID': 'Record ID',
'Register': 'Register',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Semantic': 'Semantic',
'Services': 'Services',
'Stylesheet': 'Stylesheet',
'Support': 'Support',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'User ID',
'value already in database or empty': 'value already in database or empty',
'Verify Password': 'Verify Password',
'Videos': 'Videos',
'View': 'View',
'Welcome': 'Welcome',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
|
denys-duchier/django
|
refs/heads/master
|
tests/contenttypes_tests/operations_migrations/0002_rename_foo.py
|
133
|
from django.db import migrations
def assert_foo_contenttype_not_cached(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
try:
content_type = ContentType.objects.get_by_natural_key('contenttypes_tests', 'foo')
except ContentType.DoesNotExist:
pass
else:
if not ContentType.objects.filter(app_label='contenttypes_tests', model='foo').exists():
raise AssertionError('The contenttypes_tests.Foo ContentType should not be cached.')
elif content_type.model != 'foo':
raise AssertionError(
"The cached contenttypes_tests.Foo ContentType should have "
"its model set to 'foo'."
)
class Migration(migrations.Migration):
dependencies = [
('contenttypes_tests', '0001_initial'),
]
operations = [
migrations.RenameModel('Foo', 'RenamedFoo'),
migrations.RunPython(assert_foo_contenttype_not_cached, migrations.RunPython.noop)
]
|
Edu-Glez/Bank_sentiment_analysis
|
refs/heads/master
|
env/lib/python3.6/site-packages/urllib3/util/request.py
|
87
|
from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b, integer_types
from ..exceptions import UnrewindableBodyError
ACCEPT_ENCODING = 'gzip,deflate'
_FAILEDTELL = object()
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
def set_file_position(body, pos):
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, 'tell', None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body, body_pos):
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, 'seek', None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError("An error occured when rewinding request "
"body for redirect/retry.")
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError("Unable to record file position for rewinding "
"request body during a redirect/retry.")
else:
raise ValueError("body_pos must be of type integer, "
"instead it was %s." % type(body_pos))
|
gxx/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/modeltests/get_object_or_404/models.py
|
91
|
"""
35. DB-API Shortcuts
``get_object_or_404()`` is a shortcut function to be used in view functions for
performing a ``get()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``get()`` call.
``get_list_or_404()`` is a shortcut function to be used in view functions for
performing a ``filter()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``filter()`` call.
"""
from django.db import models
from django.http import Http404
from django.shortcuts import get_object_or_404, get_list_or_404
class Author(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class ArticleManager(models.Manager):
def get_query_set(self):
return super(ArticleManager, self).get_query_set().filter(authors__name__icontains='sir')
class Article(models.Model):
authors = models.ManyToManyField(Author)
title = models.CharField(max_length=50)
objects = models.Manager()
by_a_sir = ArticleManager()
def __unicode__(self):
return self.title
|
lamby/trydiffoscope
|
refs/heads/master
|
trydiffoscope/utils/test.py
|
2
|
from django.test import TestCase
from django.shortcuts import resolve_url
class TestCase(TestCase):
def assertStatusCode(self, status_code, fn, urlconf, *args, **kwargs):
response = fn(resolve_url(urlconf, *args, **kwargs))
self.assertEqual(
response.status_code,
status_code,
"Got HTTP %d but expected HTTP %d. Response:\n%s" % (
response.status_code,
status_code,
response,
)
)
return response
def assertGET(self, status_code, urlconf, *args, **kwargs):
return self.assertStatusCode(
status_code,
self.client.get,
urlconf,
*args,
**kwargs
)
def assertPOST(self, status_code, data, *args, **kwargs):
return self.assertStatusCode(
status_code, lambda x: self.client.post(x, data), *args, **kwargs
)
def assertRedirectsTo(self, response, urlconf, *args, **kwargs):
status_code = kwargs.pop('status_code', 302)
target_status_code = kwargs.pop('target_status_code', 200)
return self.assertRedirects(
response,
resolve_url(urlconf, *args, **kwargs),
status_code,
target_status_code,
)
|
amartinez-cg/OpenShadingLanguage
|
refs/heads/master
|
testsuite/MaterialX/mx_divide/mx_divide_vector2/run.py
|
32
|
#!/usr/bin/env python
import os
loc = os.environ["OSLHOME"] + os.environ["MATERIALX_OSOS"]+ "/"
def buildCmd(shader, inputs):
cmd = "--print "
for pName, pValue in inputs.iteritems():
cmd += " -param " + pName + " " + pValue
cmd += " " + shader + " -o out mx.exr"
return cmd
inputs = {
"in1.x" : "0.1",
"in1.y" : "0.2",
"in2.x" : "0.3",
"in2.y" : "0.4"
}
shader = loc + os.path.basename(os.getcwd())
command = testshade(buildCmd(shader, inputs))
command = None
|
mscansian/awslogs-cmd
|
refs/heads/master
|
logstream.py
|
1
|
""" logstream.py
Small utility lib to stream to AWS Cloud Watch Logs.
"""
import os
import logging
import pytz
from datetime import datetime
import boto3
import botocore
CLOUDWATCH_LOGS_REGION = os.environ.get("CLOUDWATCH_LOGS_REGION", "us-east-1")
logger = logging.getLogger(__name__)
class BaseLogStream:
def log(self, message, timestamp=None):
raise NotImplementedError()
def push(self):
raise NotImplementedError()
def __init__(self, group_name, stream_name):
self.group_name = group_name
self.stream_name = stream_name
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.push()
class CloudWatchLogsStream(BaseLogStream):
LOG_EVENT_HEADER_SIZE = 26 # 26 bytes
MAX_BATCH_SIZE = 1048576 # 1 MB
MAX_BATCH_COUNT = 1000 # 1000 events
PUSH_SIZE_THRESHOLD = 0.8 # 80%
PUSH_COUNT_THRESHOLD = 0.99 # 99%
PUSH_TIME_THRESHOLD = 30000 # 30 seconds
""" Alias for log() """
def write(self, data):
self.log(data)
""" Log message """
def log(self, message, timestamp=None):
if self._crossed_any_thresholds():
self.push()
if not message:
return # No message to log
if timestamp is None:
timestamp = self._current_time()
self._create_log_event(message, timestamp)
def _create_log_event(self, message, timestamp):
self._log_events.append({"timestamp": timestamp, "message": message})
self._log_events_size += len(message) + self.LOG_EVENT_HEADER_SIZE
def _crossed_any_thresholds(self):
if self._crossed_time_threshold() or self._crossed_size_thresholds():
return True
return False
def _crossed_size_thresholds(self):
max_size = self.MAX_BATCH_SIZE * self.PUSH_SIZE_THRESHOLD
if self._log_events_size >= max_size:
logger.info("Forcing a PUSH. Reached max batch size.")
return True
max_count = self.MAX_BATCH_COUNT * self.PUSH_COUNT_THRESHOLD
if len(self._log_events) >= max_count:
logger.info("Forcing a PUSH. Reached max batch count.")
return True
return False
def _crossed_time_threshold(self):
try:
oldest_log_event = self._log_events[0]
expiration_time = self._current_time() - self.PUSH_TIME_THRESHOLD
if oldest_log_event["timestamp"] <= expiration_time:
logger.info("Forcing a PUSH. Reached time threshold.")
return True
except IndexError:
return False
return False
""" Push logged messages to AWS Cloud Watch """
def push(self):
logger.info("Pushing logs to CloudWatch.")
if not len(self._log_events):
logger.warning("No data to push.")
return
self._awslogs_push(self.group_name, self.stream_name, self._log_events)
self._clear_log_events()
logger.info("Push completed.")
def _awslogs_push(self, group_name, stream_name, log_events):
response = self._client.put_log_events(
logGroupName=group_name,
logStreamName=stream_name,
logEvents=log_events,
sequenceToken=self._sequence_token
)
self._sequence_token = response["nextSequenceToken"]
def _get_sequence_token(self):
response = self._client.describe_log_streams(
logGroupName=self.group_name,
logStreamNamePrefix=self.stream_name,
limit=1
)
return response["logStreams"][0].get("uploadSequenceToken", "0")
def _clear_log_events(self):
self._log_events_size = 0
self._log_events = []
def __init__(self, group_name, stream_name):
super(CloudWatchLogsStream, self).__init__(group_name, stream_name)
self._client = boto3.client('logs', region_name=CLOUDWATCH_LOGS_REGION)
self._clear_log_events()
self._create_stream()
def _create_stream(self):
try:
logger.info("Creating CloudWatch stream.")
self._client.create_log_stream(
logGroupName=self.group_name,
logStreamName=self.stream_name
)
self._sequence_token = "0"
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ResourceAlreadyExistsException":
logger.warning("CloudWatch Stream already exists.")
self._sequence_token = self._get_sequence_token()
else:
raise
def _current_time(self):
utc_time = datetime.now(pytz.timezone('UTC'))
return int(utc_time.timestamp() * 1000)
|
dmacvicar/spacewalk
|
refs/heads/master
|
scripts/update_symlinks.py
|
2
|
#!/usr/bin/python
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Test for blob updates
#
"""
This script is meant to be used by spacewalk users upgrading from 1.0 to 1.1.
The schema storing the symlinks target path was updated between spacewalk 1.0 to 1.1
from a blob in rhnConfigContent to symlink_target_filename_id in rhnConfigInfo.
This script extracts symlink paths that were previously stored as blobs in rhnConfigContent
and then creates an entry in rhnConfigFileName with that path and sets the
rhnConfigInfo.symlink_target_filename_id.
It acquires the database information from rhn.conf
"""
import sys
sys.path.insert(0, "/usr/share/rhn")
from common.rhnConfig import CFG, initCFG
from server import rhnSQL
from server.importlib.backendLib import Table, DBblob, DBint, TableUpdate, \
TableInsert
from pprint import pprint
from os.path import isabs
def setup_db():
initCFG('server.satellite')
db_backend = CFG.DB_BACKEND
db_host = CFG.DB_HOST
db_port = CFG.DB_PORT
db_user = CFG.DB_user
db_password = CFG.DB_PASSWORD
database = CFG.DB_NAME
rhnSQL.initDB(backend=db_backend, host=db_host, port=db_port,
username=db_user, password=db_password, database=database)
def main():
setup_db()
print "================="
print "Updating Symbolic Links"
q = """select cr.id as rev_id,
ccon.id as content_id,
ccon.contents,
cr.CONFIG_INFO_ID as info_id,
cf.id as file_id,
cc.org_id,
wc.name as org_name,
ci.SELINUX_CTX as selinux,
cfn.path as path,
ci.SYMLINK_TARGET_FILENAME_ID as info_target,
nvl( (select path from rhnCOnfigFileName where id = ci.SYMLINK_TARGET_FILENAME_ID), 'None') as name_target
from rhnConfigContent ccon
inner join rhnConfigRevision cr on cr.config_content_id = ccon.id
inner join rhnConfigFile cf on cr.CONFIG_FILE_ID = cf.id
inner join rhnConfigFileName cfn on cfn.id = cf.config_file_name_id
inner join rhnConfigInfo ci on ci.id = cr.CONFIG_INFO_ID
inner join rhnConfigChannel cc on cf.CONFIG_CHANNEL_ID = cc.id
inner join web_customer wc on cc.org_id = wc.id
where
cr.CONFIG_FILE_TYPE_ID in (select id from rhnConfigFileType where label='symlink')"""
h = rhnSQL.prepare(q)
h.execute()
results = h.fetchall_dict()
if not results:
print "Update completed."
print "================="
return
contents = []
for row in results:
contents.append( dict(revision_id = row["rev_id"],
file_id = row ["file_id"],
info_id = row ["info_id"],
content_id = row ["content_id"],
path = row['path'],
info_target = row['info_target'],
name_target = row['name_target'],
selinux = row['selinux'],
org_id = row['org_id'],
org_name = row['org_name'],
symlink_target = rhnSQL.read_lob(row["contents"])))
update_query = """update rhnConfigRevision set config_info_id =
lookup_config_info(null, null, null, :selinux, lookup_config_filename(:symlink_target)) where id = :revision_id"""
null_symlink_update_query = """update rhnConfigRevision set config_info_id =
lookup_config_info(null, null, null, :selinux, null) where id = :revision_id"""
update_cr = """ update rhnConfigRevision set config_content_id = null where id = :revision_id"""
delete_content = """ delete from rhnConfigContent where id = :content_id"""
format = """
Path: [%(path)s]
Symbolic link:[%(symlink_target)s]
Update URL: https://<FQDN>/rhn/configuration/file/FileDetails.do?cfid=%(file_id)d&crid=%(revision_id)d
Organization Id : [%(org_id)d]
Organization Name : [%(org_name)s]
"""
bad_items = list()
for item in contents:
if item['symlink_target'] is None:
bad_items.append(item)
rhnSQL.prepare(null_symlink_update_query).execute(**item)
else:
if not isabs(item['symlink_target']) or len(item['symlink_target']) >= 1024:
bad_items.append(item)
item['symlink_target'] = item['symlink_target'][:1024]
rhnSQL.prepare(update_query).execute(**item)
rhnSQL.prepare(update_cr).execute(**item)
rhnSQL.prepare(delete_content).execute(**item)
print format % item
rhnSQL.commit()
rhnSQL.closeDB()
print "%d rows updated." % len(contents)
print "Update completed"
print "================="
msg = """
The following symbolic link paths are either null or not absolute or above 1024 characters in length.
While entries have been added in the DB, the values have to be updated for them in the Web UI.
Please go to the provided url, logging in as a user with config admin/org admin role in the specified organization
and update the target path value accordingly.
"""
if bad_items:
print msg
for item in bad_items:
print format % item
if __name__ == '__main__':
sys.exit(main() or 0)
|
tribeiro/chimera
|
refs/heads/master
|
src/chimera/core/path.py
|
5
|
import os
from chimera.util.findplugins import find_chimera_plugins
__all__ = ['ChimeraPath']
class ChimeraPath (object):
def __init__(self):
# Search for chimera plugins on the sys.path
self._controllers_plugins, self._instruments_plugins = find_chimera_plugins()
self._instruments = [os.path.join(self.root(), 'instruments')]
self._instruments.extend(self._instruments_plugins)
self._controllers = [os.path.join(self.root(), 'controllers')]
self._controllers.extend(self._controllers_plugins)
@staticmethod
def root():
return os.path.realpath(os.path.join(os.path.abspath(__file__), '../../'))
@property
def instruments(self):
return self._instruments
@property
def controllers(self):
return self._controllers
|
shikigit/python-phonenumbers
|
refs/heads/dev
|
python/tests/testgeodata/data0.py
|
5
|
"""Per-prefix data, mapping each prefix to a dict of locale:name.
Auto-generated file, do not edit by hand.
"""
from phonenumbers.util import u
# Copyright (C) 2011-2015 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data = {
'8255':{'en': 'Gyeongnam', 'ko': u('\uacbd\ub0a8')},
'8232':{'en': 'Incheon', 'ko': u('\uc778\ucc9c')},
'8231':{'en': 'Gyeonggi', 'ko': u('\uacbd\uae30')},
'8251':{'en': 'Busan', 'ko': u('\ubd80\uc0b0')},
'8253':{'en': 'Daegu', 'ko': u('\ub300\uad6c')},
'8252':{'en': 'Ulsan', 'ko': u('\uc6b8\uc0b0')},
'1650960':{'en': 'Mountain View, CA'},
'1201':{'de': 'New Jersey', 'en': 'NJ'},
'8264':{'en': 'Jeju'},
'542214':{'en': 'La Plata'},
'1989':{'en': 'MA'},
'1212812':{'en': 'New York, NY'},
'1650':{'de': 'Kalifornien', 'en': 'CA'},
'1212':{'en': 'NY'},
'8254':{'en': 'Gyeongbuk', 'ko': u('\uacbd\ubd81')},
'8233':{'en': 'Gangwon', 'ko': u('\uac15\uc6d0')},
'8242':{'en': 'Daejeon', 'ko': u('\ub300\uc804')},
'8243':{'en': 'Chungbuk', 'ko': u('\ucda9\ubd81')},
'8241':{'en': 'Chungnam', 'ko': u('\ucda9\ub0a8')},
'8261':{'en': 'Jeonnam', 'ko': u('\uc804\ub0a8')},
'8262':{'en': 'Gwangju', 'ko': u('\uad11\uc8fc')},
'8263':{'en': 'Jeonbuk', 'ko': u('\uc804\ubd81')},
'1617423':{'en': 'Boston, MA'},
'822':{'en': 'Seoul', 'ko': u('\uc11c\uc6b8')},
}
|
xuweiliang/Codelibrary
|
refs/heads/master
|
openstack_dashboard/dashboards/settings/licensedisplay/views.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings # noqa
from horizon import tables
from openstack_dashboard import api
from datetime import datetime as time
from openstack_dashboard.dashboards.admin.licensedisplay import tables as project_tables
from django.utils.datastructures import SortedDict
from horizon import forms
import datetime
from django.core.urlresolvers import reverse_lazy
from openstack_dashboard.dashboards.admin.licensedisplay.gfgq import AuthCode as code
from openstack_dashboard.dashboards.admin.licensedisplay import forms as register_forms
LOG = logging.getLogger(__name__)
class CreateLicenseView(forms.ModalFormView):
form_class = register_forms.LicenseRegisterForm
template_name = 'admin/licensedisplay/settings.html'
success_url = reverse_lazy('horizon:admin:licensedisplay:index')
class LicenseDisplayView(tables.DataTableView):
table_class = project_tables.DisplayTable
template_name = 'admin/licensedisplay/index.html'
# def get_context_data(self, **kwargs):
# context = super(LicenseDisplayView, self).get_context_data(**kwargs)
# return context
def get_data(self):
#licences = {}
marker = self.request.GET.get(
project_tables.DisplayTable._meta.pagination_param, None)
try:
#
licences = api.nova.get_licence(self.request)
licences.bb = 'aaaaaaaaaa'
LOG.info("licences ======================%s" % licences.__dict__)
decoded_string = eval(code.decode(licences.guofudata, 'fr1e54b8t4n4m47'))
#licencesa=decoded_string
number = decoded_string['num']
during = decoded_string['during']
available = number - licences.used
licences.number = number
licences.available = available
#licences.time = decoded_string['time']
licences.time = time.strptime(decoded_string['time'], '%Y-%m-%dT%H:%M:%S.%f')
licences.time = time.strptime(licences.registrationtime, '%Y-%m-%dT%H:%M:%S.%f')
d1 = licences.time
licences.during = d1 + datetime.timedelta(days = during)
except Exception:
self._more = False
return [licences]
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/conditionalImportFromPackage/before/src/pkg1/__init__.py
|
12133432
| |
raychorn/knowu
|
refs/heads/master
|
django/djangononrelsample2/django/conf/locale/ar/__init__.py
|
12133432
| |
syphar/django
|
refs/heads/master
|
tests/modeladmin/__init__.py
|
12133432
| |
IlyaDjurin/django-shop
|
refs/heads/master
|
shop/apps.py
|
19
|
from django.apps import AppConfig
class ShopConfig(AppConfig):
name = 'shop'
|
washort/zamboni
|
refs/heads/master
|
mkt/api/tests/__init__.py
|
6
|
import json
from django.db import transaction
from mkt.site.tests import TestCase
class BaseAPI(TestCase):
"""
A base test case useful for API testing.
"""
def _allowed_verbs(self, url, allowed):
"""
Will run through all the verbs except the ones specified in allowed
and ensure that hitting those produces a 405. Otherwise the test will
fail.
"""
verbs = ['get', 'post', 'put', 'patch', 'delete']
for verb in verbs:
if verb in allowed:
continue
try:
with transaction.atomic():
res = getattr(self.client, verb)(url)
except AttributeError:
# Not all clients have patch.
if verb != 'patch':
raise
msg = 'Expected 40{1,3,5} for %s, got %s' % (verb.upper(),
res.status_code)
assert res.status_code in (401, 403, 405), msg
def get_error(self, response):
return json.loads(response.content)['error_message']
|
thinker0/aurora
|
refs/heads/master
|
src/main/python/apache/thermos/cli/commands/read.py
|
13
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import time
from twitter.common import app
from twitter.common.recordio import RecordIO, ThriftRecordReader
from apache.thermos.common.ckpt import CheckpointDispatcher
from gen.apache.thermos.ttypes import ProcessState, RunnerCkpt, RunnerState, TaskState
@app.command
@app.command_option("--simple", default=False, dest='simple', action='store_true',
help="Only print the checkpoint records, do not replay them.")
def read(args, options):
"""Replay a thermos checkpoint.
Usage: thermos read [options] checkpoint_filename
"""
if len(args) != 1:
app.error('Expected one checkpoint file, got %s' % len(args))
if not os.path.exists(args[0]):
app.error('Could not find %s' % args[0])
dispatcher = CheckpointDispatcher()
state = RunnerState(processes={})
with open(args[0], 'r') as fp:
try:
for record in ThriftRecordReader(fp, RunnerCkpt):
if not options.simple:
dispatcher.dispatch(state, record)
else:
print('CKPT: %s' % record)
except RecordIO.Error as err:
print("Failed to recover from %s: %s" % (fp.name, err))
return
if not options.simple:
if state is None or state.header is None:
print('Checkpoint stream CORRUPT or outdated format')
return
print('Recovered Task Header:')
print(' id: %s' % state.header.task_id)
print(' user: %s' % state.header.user)
print(' host: %s' % state.header.hostname)
print(' sandbox: %s' % state.header.sandbox)
if state.header.ports:
print(' ports: %s' % ' '.join(
'%s->%s' % (name, port) for (name, port) in state.header.ports.items()))
print('Recovered Task States:')
for task_status in state.statuses:
print(' %s [pid: %d] => %s' % (
time.asctime(time.localtime(task_status.timestamp_ms / 1000.0)),
task_status.runner_pid,
TaskState._VALUES_TO_NAMES[task_status.state]))
print('Recovered Processes:')
for process, process_history in state.processes.items():
print(' %s runs: %s' % (process, len(process_history)))
for k in reversed(range(len(process_history))):
run = process_history[k]
print(' %2d: pid=%d, rc=%s, finish:%s, state:%s' % (
k,
run.pid,
run.return_code if run.return_code is not None else '',
time.asctime(time.localtime(run.stop_time)) if run.stop_time else 'None',
ProcessState._VALUES_TO_NAMES.get(run.state, 'Unknown')))
|
gcompris/GCompris-site
|
refs/heads/master
|
activity/color/closure/closure/bin/build/treescan.py
|
39
|
#!/usr/bin/env python
#
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions for scanning directory trees."""
import os
import re
# Matches a .js file path.
_JS_FILE_REGEX = re.compile(r'^.+\.js$')
def ScanTreeForJsFiles(root):
"""Scans a directory tree for JavaScript files.
Args:
root: str, Path to a root directory.
Returns:
An iterable of paths to JS files, relative to cwd.
"""
return ScanTree(root, path_filter=_JS_FILE_REGEX)
def ScanTree(root, path_filter=None, ignore_hidden=True):
"""Scans a directory tree for files.
Args:
root: str, Path to a root directory.
path_filter: A regular expression fileter. If set, only paths matching
the path_filter are returned.
ignore_hidden: If True, do not follow or return hidden directories or files
(those starting with a '.' character).
Yields:
A string path to files, relative to cwd.
"""
def OnError(os_error):
raise os_error
for dirpath, dirnames, filenames in os.walk(root, onerror=OnError):
# os.walk allows us to modify dirnames to prevent decent into particular
# directories. Avoid hidden directories.
for dirname in dirnames:
if ignore_hidden and dirname.startswith('.'):
dirnames.remove(dirname)
for filename in filenames:
# nothing that starts with '.'
if ignore_hidden and filename.startswith('.'):
continue
fullpath = os.path.join(dirpath, filename)
if path_filter and not path_filter.match(fullpath):
continue
yield os.path.normpath(fullpath)
|
bzennn/blog_flask
|
refs/heads/master
|
python/lib/python3.5/site-packages/alembic/templates/pylons/env.py
|
41
|
"""Pylons bootstrap environment.
Place 'pylons_config_file' into alembic.ini, and the application will
be loaded from there.
"""
from alembic import context
from paste.deploy import loadapp
from logging.config import fileConfig
from sqlalchemy.engine.base import Engine
try:
# if pylons app already in, don't create a new app
from pylons import config as pylons_config
pylons_config['__file__']
except:
config = context.config
# can use config['__file__'] here, i.e. the Pylons
# ini file, instead of alembic.ini
config_file = config.get_main_option('pylons_config_file')
fileConfig(config_file)
wsgi_app = loadapp('config:%s' % config_file, relative_to='.')
# customize this section for non-standard engine configurations.
meta = __import__("%s.model.meta" % wsgi_app.config['pylons.package']).model.meta
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=meta.engine.url, target_metadata=target_metadata,
literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# specify here how the engine is acquired
# engine = meta.engine
raise NotImplementedError("Please specify engine connectivity here")
with engine.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
DomainGroupOSS/luigi
|
refs/heads/master
|
luigi/scalding.py
|
37
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
luigi.scalding has moved to luigi.contrib.scalding
"""
import warnings
from luigi.contrib.scalding import *
warnings.warn("luigi.scalding has now moved to luigi.contrib.scalding", DeprecationWarning, stacklevel=3)
|
bcornwellmott/frappe
|
refs/heads/develop
|
frappe/templates/autodoc/contents.py
|
42
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
from frappe.website.utils import get_full_index
def get_context(context):
context.full_index = get_full_index()
|
anirudhSK/chromium
|
refs/heads/master
|
native_client_sdk/src/gonacl_appengine/gonacl.py
|
26
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import webapp2
application = webapp2.WSGIApplication([
webapp2.Route('/', webapp2.RedirectHandler, defaults={
'_uri': 'http://developers.google.com/native-client/dev'}),
], debug=True)
|
sjaco002/vxquery
|
refs/heads/master
|
vxquery-benchmark/src/main/resources/util/diff_xml_files.py
|
11
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt, glob, os, sys
def main(argv):
f1 = ""
f2 = ""
# Get the base folder
try:
opts, args = getopt.getopt(argv, "h", ["f1=", "f2="])
except getopt.GetoptError:
print 'The file options for build_saxon_collection_xml.py were not correctly specified.'
print 'To see a full list of options try:'
print ' $ python build_saxon_collection_xml.py -h'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Options:'
print ' -f The base folder to create collection XML file.'
sys.exit()
elif opt in ('--f1'):
# check if file exists.
if os.path.exists(arg):
f1 = arg
else:
print 'Error: Argument must be a file name for --f1.'
sys.exit()
elif opt in ('--f2'):
# check if file exists.
if os.path.exists(arg):
f2 = arg
else:
print 'Error: Argument must be a file name for --f2.'
sys.exit()
# Required fields to run the script.
if f1 == "" or not os.path.exists(f1):
print 'Error: The file path option must be supplied: --f1.'
sys.exit()
if f2 == "" or not os.path.exists(f2):
print 'Error: The file path option must be supplied: --f2.'
sys.exit()
missing_in_f1 = []
missing_in_f2 = []
found_in_both = []
with open(f1) as f:
content_f1 = f.readlines()
set_f1 = set(content_f1)
with open(f2) as f:
content_f2 = f.readlines()
set_f2 = set(content_f2)
missing_in_f1 = set_f2.difference(set_f1)
missing_in_f2 = set_f1.difference(set_f2)
found_in_both = set_f1.intersection(set_f2)
print ""
print "Missing files in " + f1
for f1_name in missing_in_f1:
print " + " + f1_name.strip()
print ""
print "Missing files in " + f2
for f2_name in missing_in_f2:
print " + " + f2_name.strip()
offset = 40
print ""
print "XML Summary"
print (" - Found in both:").ljust(offset) + str(len(found_in_both))
print (" - " + f1 + " diff set vs list:").ljust(offset) + str(len(content_f1) - len(set_f1))
print (" - " + f2 + " diff set vs list:").ljust(offset) + str(len(content_f2) - len(set_f2))
print (" - " + f1 + " missing:").ljust(offset) + str(len(missing_in_f1))
print (" - " + f2 + " missing:").ljust(offset) + str(len(missing_in_f2))
if __name__ == "__main__":
main(sys.argv[1:])
|
bforbis/thrift
|
refs/heads/master
|
test/py/TestSocket.py
|
21
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.transport import TSocket
import unittest
import time
import socket
import random
class TimeoutTest(unittest.TestCase):
def setUp(self):
for i in range(50):
try:
# find a port we can use
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = random.randint(10000, 30000)
self.listen_sock.bind(('localhost', self.port))
self.listen_sock.listen(5)
break
except Exception:
if i == 49:
raise
def testConnectTimeout(self):
starttime = time.time()
try:
leaky = []
for i in range(100):
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
leaky.append(socket)
except Exception:
self.assert_(time.time() - starttime < 5.0)
def testWriteTimeout(self):
starttime = time.time()
try:
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
lsock = self.listen_sock.accept()
while True:
lsock.write("hi" * 100)
except Exception:
self.assert_(time.time() - starttime < 5.0)
if __name__ == '__main__':
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TimeoutTest))
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(suite)
|
eadgarchen/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/distributions/distribution.py
|
9
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util
from tensorflow.python.util import tf_inspect
__all__ = [
"ReparameterizationType",
"FULLY_REPARAMETERIZED",
"NOT_REPARAMETERIZED",
"Distribution",
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape",
"batch_shape_tensor",
"cdf",
"covariance",
"cross_entropy",
"entropy",
"event_shape",
"event_shape_tensor",
"kl_divergence",
"log_cdf",
"log_prob",
"log_survival_function",
"mean",
"mode",
"prob",
"sample",
"stddev",
"survival_function",
"variance",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented")
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._quantile(value, **kwargs)
except NotImplementedError as original_exception:
raise original_exception
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tf.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, (Shanon) cross entropy is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tf.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
acq4/acq4
|
refs/heads/develop
|
acq4/pyqtgraph/console/template_pyqt.py
|
3
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'console/template.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(739, 497)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.output = QtGui.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName(_fromUtf8("output"))
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName(_fromUtf8("input"))
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtGui.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName(_fromUtf8("historyBtn"))
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtGui.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName(_fromUtf8("exceptionBtn"))
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.historyList = QtGui.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
self.historyList.setFont(font)
self.historyList.setObjectName(_fromUtf8("historyList"))
self.exceptionGroup = QtGui.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName(_fromUtf8("exceptionGroup"))
self.gridLayout_2 = QtGui.QGridLayout(self.exceptionGroup)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setHorizontalSpacing(2)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.clearExceptionBtn = QtGui.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName(_fromUtf8("clearExceptionBtn"))
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtGui.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName(_fromUtf8("catchAllExceptionsBtn"))
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtGui.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName(_fromUtf8("catchNextExceptionBtn"))
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtGui.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName(_fromUtf8("onlyUncaughtCheck"))
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.exceptionStackList = QtGui.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName(_fromUtf8("exceptionStackList"))
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtGui.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName(_fromUtf8("runSelectedFrameCheck"))
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
self.exceptionInfoLabel = QtGui.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setWordWrap(True)
self.exceptionInfoLabel.setObjectName(_fromUtf8("exceptionInfoLabel"))
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1)
self.label = QtGui.QLabel(self.exceptionGroup)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1)
self.filterText = QtGui.QLineEdit(self.exceptionGroup)
self.filterText.setObjectName(_fromUtf8("filterText"))
self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Console", None))
self.historyBtn.setText(_translate("Form", "History..", None))
self.exceptionBtn.setText(_translate("Form", "Exceptions..", None))
self.exceptionGroup.setTitle(_translate("Form", "Exception Handling", None))
self.clearExceptionBtn.setText(_translate("Form", "Clear Stack", None))
self.catchAllExceptionsBtn.setText(_translate("Form", "Show All Exceptions", None))
self.catchNextExceptionBtn.setText(_translate("Form", "Show Next Exception", None))
self.onlyUncaughtCheck.setText(_translate("Form", "Only Uncaught Exceptions", None))
self.runSelectedFrameCheck.setText(_translate("Form", "Run commands in selected stack frame", None))
self.exceptionInfoLabel.setText(_translate("Form", "Stack Trace", None))
self.label.setText(_translate("Form", "Filter (regex):", None))
from .CmdInput import CmdInput
|
endlessm/chromium-browser
|
refs/heads/master
|
third_party/catapult/third_party/google-endpoints/rsa/__init__.py
|
79
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
"""
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2016-03-29"
__version__ = '3.4.2'
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
|
salaria/odoo
|
refs/heads/8.0
|
addons/lunch/tests/test_lunch.py
|
345
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.tests import common
class Test_Lunch(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(Test_Lunch, self).setUp()
cr, uid = self.cr, self.uid
self.res_users = self.registry('res.users')
self.lunch_order = self.registry('lunch.order')
self.lunch_order_line = self.registry('lunch.order.line')
self.lunch_cashmove = self.registry('lunch.cashmove')
self.lunch_product = self.registry('lunch.product')
self.lunch_alert = self.registry('lunch.alert')
self.lunch_product_category = self.registry('lunch.product.category')
self.demo_id = self.res_users.search(cr, uid, [('name', '=', 'Demo User')])
self.product_bolognese_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'lunch', 'product_Bolognese')
self.product_Bolognese_id = self.product_bolognese_ref and self.product_bolognese_ref[1] or False
self.new_id_order = self.lunch_order.create(cr,uid,{
'user_id': self.demo_id[0],
'order_line_ids':'[]',
},context=None)
self.new_id_order_line = self.lunch_order_line.create(cr,uid,{
'order_id':self.new_id_order,
'product_id':self.product_Bolognese_id,
'note': '+Emmental',
'cashmove': [],
'price': self.lunch_product.browse(cr,uid,self.product_Bolognese_id,context=None).price,
})
def test_00_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered'. Check that there are no cashmove linked to that order line"""
cr, uid = self.cr, self.uid
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state,'new')
self.assertEqual(list(self.order_one.cashmove), [])
#we order that orderline so it's state will be 'ordered'
self.order_one.order()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state,'ordered')
self.assertEqual(list(self.order_one.cashmove), [])
def test_01_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered' then to 'confirmed'. Check that there is a cashmove linked to the order line"""
cr, uid = self.cr, self.uid
self.test_00_lunch_order()
#We receive the order so we confirm the order line so it's state will be 'confirmed'
#A cashmove will be created and we will test that the cashmove amount equals the order line price
self.order_one.confirm()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:
self.assertEqual(self.order_one.state,'confirmed')
self.assertTrue(self.order_one.cashmove)
self.assertTrue(self.order_one.cashmove[0].amount==-self.order_one.price)
def test_02_lunch_order(self):
"""Change the state of an order line from 'confirmed' to 'cancelled' and check that the cashmove linked to that order line will be deleted"""
cr, uid = self.cr, self.uid
self.test_01_lunch_order()
#We have a confirmed order with its associate cashmove
#We execute the cancel function
self.order_one.cancel()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#We check that the state is cancelled and that the cashmove has been deleted
self.assertEqual(self.order_one.state,'cancelled')
self.assertFalse(self.order_one.cashmove)
|
wolverineav/horizon
|
refs/heads/master
|
openstack_dashboard/test/test_panels/__init__.py
|
12133432
| |
adamklawonn/CityCircles
|
refs/heads/master
|
citycircles_iphone/Classes/citycircles_forclient/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.