repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
cmheisel/presentation-python-threading
|
refs/heads/master
|
examples/async.py
|
1
|
"""
Thanks to http://skipperkongen.dk/2016/09/09/easy-parallel-http-requests-with-python-and-asyncio/ for the pattern.
"""
import asyncio
from timeit import default_timer as timer
import requests
URLS = [
"http://slowyourload.net/5/https://chrisheisel.com",
"http://slowyourload.net/4/https://chrisheisel.com",
"http://slowyourload.net/3/https://chrisheisel.com",
"http://slowyourload.net/2/https://chrisheisel.com",
"http://slowyourload.net/1/https://chrisheisel.com",
]
def get_url(url):
print("GET {}".format(url))
requests.get(url)
print("\tDONE GET {}".format(url))
async def main(loop):
print("Async ====================")
start = timer()
futures = []
for url in URLS:
future = loop.run_in_executor(None, get_url, url)
futures.append(future)
for response in await asyncio.gather(*futures):
pass
end = timer()
duration = (end - start)
print("DONE in {} seconds".format(duration))
if __name__ == "__main__":
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(main(event_loop))
finally:
event_loop.close()
|
inovtec-solutions/OpenERP
|
refs/heads/branch_openerp
|
openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/tiny_socket.py
|
386
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import socket
import cPickle
import cStringIO
import marshal
class Myexception(Exception):
def __init__(self, faultCode, faultString):
self.faultCode = faultCode
self.faultString = faultString
self.args = (faultCode, faultString)
class mysocket:
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.sock.settimeout(120)
def connect(self, host, port=False):
if not port:
protocol, buf = host.split('//')
host, port = buf.split(':')
self.sock.connect((host, int(port)))
def disconnect(self):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
def mysend(self, msg, exception=False, traceback=None):
msg = cPickle.dumps([msg,traceback])
size = len(msg)
self.sock.send('%8d' % size)
self.sock.send(exception and "1" or "0")
totalsent = 0
while totalsent < size:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError, "Socket connection broken."
totalsent = totalsent + sent
def myreceive(self):
buf=''
while len(buf) < 8:
chunk = self.sock.recv(8 - len(buf))
if chunk == '':
raise RuntimeError, "Socket connection broken."
buf += chunk
size = int(buf)
buf = self.sock.recv(1)
if buf != "0":
exception = buf
else:
exception = False
msg = ''
while len(msg) < size:
chunk = self.sock.recv(size-len(msg))
if chunk == '':
raise RuntimeError, "Socket connection broken."
msg = msg + chunk
msgio = cStringIO.StringIO(msg)
unpickler = cPickle.Unpickler(msgio)
unpickler.find_global = None
res = unpickler.load()
if isinstance(res[0],Exception):
if exception:
raise Myexception(str(res[0]), str(res[1]))
raise res[0]
else:
return res[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Sumith1896/sympy
|
refs/heads/master
|
sympy/integrals/meijerint.py
|
1
|
"""
Integrate functions by rewriting them as Meijer G-functions.
There are three user-visible functions that can be used by other parts of the
sympy library to solve various integration problems:
- meijerint_indefinite
- meijerint_definite
- meijerint_inversion
They can be used to compute, respectively, indefinite integrals, definite
integrals over intervals of the real line, and inverse laplace-type integrals
(from c-I*oo to c+I*oo). See the respective docstrings for details.
The main references for this are:
[L] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
[R] Kelly B. Roach. Meijer G Function Representations.
In: Proceedings of the 1997 International Symposium on Symbolic and
Algebraic Computation, pages 205-211, New York, 1997. ACM.
[P] A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
Integrals and Series: More Special Functions, Vol. 3,.
Gordon and Breach Science Publisher
"""
from __future__ import print_function, division
from sympy.core import oo, S, pi, Expr
from sympy.core.exprtools import factor_terms
from sympy.core.function import expand, expand_mul, expand_power_base
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.compatibility import range
from sympy.core.cache import cacheit
from sympy.core.symbol import Dummy, Wild
from sympy.simplify import hyperexpand, powdenest, collect
from sympy.logic.boolalg import And, Or, BooleanAtom
from sympy.functions.special.delta_functions import Heaviside
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.hyperbolic import \
_rewrite_hyperbolics_as_exp, HyperbolicFunction
from sympy.functions.special.hyper import meijerg
from sympy.utilities.iterables import multiset_partitions, ordered
from sympy.utilities.misc import debug as _debug
from sympy.utilities import default_sort_key
# keep this at top for easy reference
z = Dummy('z')
def _has(res, *f):
# return True if res has f; in the case of Piecewise
# only return True if *all* pieces have f
res = piecewise_fold(res)
if getattr(res, 'is_Piecewise', False):
return all(_has(i, *f) for i in res.args)
return res.has(*f)
def _create_lookup_table(table):
""" Add formulae for the function -> meijerg lookup table. """
def wild(n):
return Wild(n, exclude=[z])
p, q, a, b, c = list(map(wild, 'pqabc'))
n = Wild('n', properties=[lambda x: x.is_Integer and x > 0])
t = p*z**q
def add(formula, an, ap, bm, bq, arg=t, fac=S(1), cond=True, hint=True):
table.setdefault(_mytype(formula, z), []).append((formula,
[(fac, meijerg(an, ap, bm, bq, arg))], cond, hint))
def addi(formula, inst, cond, hint=True):
table.setdefault(
_mytype(formula, z), []).append((formula, inst, cond, hint))
def constant(a):
return [(a, meijerg([1], [], [], [0], z)),
(a, meijerg([], [1], [0], [], z))]
table[()] = [(a, constant(a), True, True)]
# [P], Section 8.
from sympy import unpolarify, Function, Not
class IsNonPositiveInteger(Function):
@classmethod
def eval(cls, arg):
arg = unpolarify(arg)
if arg.is_Integer is True:
return arg <= 0
# Section 8.4.2
from sympy import (gamma, pi, cos, exp, re, sin, sqrt, sinh, cosh,
factorial, log, erf, erfc, erfi, polar_lift)
# TODO this needs more polar_lift (c/f entry for exp)
add(Heaviside(t - b)*(t - b)**(a - 1), [a], [], [], [0], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside(b - t)*(b - t)**(a - 1), [], [a], [0], [], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside(z - (b/p)**(1/q))*(t - b)**(a - 1), [a], [], [], [0], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside((b/p)**(1/q) - z)*(b - t)**(a - 1), [], [a], [0], [], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add((b + t)**(-a), [1 - a], [], [0], [], t/b, b**(-a)/gamma(a),
hint=Not(IsNonPositiveInteger(a)))
add(abs(b - t)**(-a), [1 - a], [(1 - a)/2], [0], [(1 - a)/2], t/b,
pi/(gamma(a)*cos(pi*a/2))*abs(b)**(-a), re(a) < 1)
add((t**a - b**a)/(t - b), [0, a], [], [0, a], [], t/b,
b**(a - 1)*sin(a*pi)/pi)
# 12
def A1(r, sign, nu):
return pi**(-S(1)/2)*(-sign*nu/2)**(1 - 2*r)
def tmpadd(r, sgn):
# XXX the a**2 is bad for matching
add((sqrt(a**2 + t) + sgn*a)**b/(a**2 + t)**r,
[(1 + b)/2, 1 - 2*r + b/2], [],
[(b - sgn*b)/2], [(b + sgn*b)/2], t/a**2,
a**(b - 2*r)*A1(r, sgn, b))
tmpadd(0, 1)
tmpadd(0, -1)
tmpadd(S(1)/2, 1)
tmpadd(S(1)/2, -1)
# 13
def tmpadd(r, sgn):
add((sqrt(a + p*z**q) + sgn*sqrt(p)*z**(q/2))**b/(a + p*z**q)**r,
[1 - r + sgn*b/2], [1 - r - sgn*b/2], [0, S(1)/2], [],
p*z**q/a, a**(b/2 - r)*A1(r, sgn, b))
tmpadd(0, 1)
tmpadd(0, -1)
tmpadd(S(1)/2, 1)
tmpadd(S(1)/2, -1)
# (those after look obscure)
# Section 8.4.3
add(exp(polar_lift(-1)*t), [], [], [0], [])
# TODO can do sin^n, sinh^n by expansion ... where?
# 8.4.4 (hyperbolic functions)
add(sinh(t), [], [1], [S(1)/2], [1, 0], t**2/4, pi**(S(3)/2))
add(cosh(t), [], [S(1)/2], [0], [S(1)/2, S(1)/2], t**2/4, pi**(S(3)/2))
# Section 8.4.5
# TODO can do t + a. but can also do by expansion... (XXX not really)
add(sin(t), [], [], [S(1)/2], [0], t**2/4, sqrt(pi))
add(cos(t), [], [], [0], [S(1)/2], t**2/4, sqrt(pi))
# Section 8.5.5
def make_log1(subs):
N = subs[n]
return [((-1)**N*factorial(N),
meijerg([], [1]*(N + 1), [0]*(N + 1), [], t))]
def make_log2(subs):
N = subs[n]
return [(factorial(N),
meijerg([1]*(N + 1), [], [], [0]*(N + 1), t))]
# TODO these only hold for positive p, and can be made more general
# but who uses log(x)*Heaviside(a-x) anyway ...
# TODO also it would be nice to derive them recursively ...
addi(log(t)**n*Heaviside(1 - t), make_log1, True)
addi(log(t)**n*Heaviside(t - 1), make_log2, True)
def make_log3(subs):
return make_log1(subs) + make_log2(subs)
addi(log(t)**n, make_log3, True)
addi(log(t + a),
constant(log(a)) + [(S(1), meijerg([1, 1], [], [1], [0], t/a))],
True)
addi(log(abs(t - a)), constant(log(abs(a))) +
[(pi, meijerg([1, 1], [S(1)/2], [1], [0, S(1)/2], t/a))],
True)
# TODO log(x)/(x+a) and log(x)/(x-1) can also be done. should they
# be derivable?
# TODO further formulae in this section seem obscure
# Sections 8.4.9-10
# TODO
# Section 8.4.11
from sympy import Ei, I, expint, Si, Ci, Shi, Chi, fresnels, fresnelc
addi(Ei(t),
constant(-I*pi) + [(S(-1), meijerg([], [1], [0, 0], [],
t*polar_lift(-1)))],
True)
# Section 8.4.12
add(Si(t), [1], [], [S(1)/2], [0, 0], t**2/4, sqrt(pi)/2)
add(Ci(t), [], [1], [0, 0], [S(1)/2], t**2/4, -sqrt(pi)/2)
# Section 8.4.13
add(Shi(t), [S(1)/2], [], [0], [S(-1)/2, S(-1)/2], polar_lift(-1)*t**2/4,
t*sqrt(pi)/4)
add(Chi(t), [], [S(1)/2, 1], [0, 0], [S(1)/2, S(1)/2], t**2/4, -
pi**S('3/2')/2)
# generalized exponential integral
add(expint(a, t), [], [a], [a - 1, 0], [], t)
# Section 8.4.14
add(erf(t), [1], [], [S(1)/2], [0], t**2, 1/sqrt(pi))
# TODO exp(-x)*erf(I*x) does not work
add(erfc(t), [], [1], [0, S(1)/2], [], t**2, 1/sqrt(pi))
# This formula for erfi(z) yields a wrong(?) minus sign
#add(erfi(t), [1], [], [S(1)/2], [0], -t**2, I/sqrt(pi))
add(erfi(t), [S(1)/2], [], [0], [-S(1)/2], -t**2, t/sqrt(pi))
# Fresnel Integrals
add(fresnels(t), [1], [], [S(3)/4], [0, S(1)/4], pi**2*t**4/16, S(1)/2)
add(fresnelc(t), [1], [], [S(1)/4], [0, S(3)/4], pi**2*t**4/16, S(1)/2)
##### bessel-type functions #####
from sympy import besselj, bessely, besseli, besselk
# Section 8.4.19
add(besselj(a, t), [], [], [a/2], [-a/2], t**2/4)
# all of the following are derivable
#add(sin(t)*besselj(a, t), [S(1)/4, S(3)/4], [], [(1+a)/2],
# [-a/2, a/2, (1-a)/2], t**2, 1/sqrt(2))
#add(cos(t)*besselj(a, t), [S(1)/4, S(3)/4], [], [a/2],
# [-a/2, (1+a)/2, (1-a)/2], t**2, 1/sqrt(2))
#add(besselj(a, t)**2, [S(1)/2], [], [a], [-a, 0], t**2, 1/sqrt(pi))
#add(besselj(a, t)*besselj(b, t), [0, S(1)/2], [], [(a + b)/2],
# [-(a+b)/2, (a - b)/2, (b - a)/2], t**2, 1/sqrt(pi))
# Section 8.4.20
add(bessely(a, t), [], [-(a + 1)/2], [a/2, -a/2], [-(a + 1)/2], t**2/4)
# TODO all of the following should be derivable
#add(sin(t)*bessely(a, t), [S(1)/4, S(3)/4], [(1 - a - 1)/2],
# [(1 + a)/2, (1 - a)/2], [(1 - a - 1)/2, (1 - 1 - a)/2, (1 - 1 + a)/2],
# t**2, 1/sqrt(2))
#add(cos(t)*bessely(a, t), [S(1)/4, S(3)/4], [(0 - a - 1)/2],
# [(0 + a)/2, (0 - a)/2], [(0 - a - 1)/2, (1 - 0 - a)/2, (1 - 0 + a)/2],
# t**2, 1/sqrt(2))
#add(besselj(a, t)*bessely(b, t), [0, S(1)/2], [(a - b - 1)/2],
# [(a + b)/2, (a - b)/2], [(a - b - 1)/2, -(a + b)/2, (b - a)/2],
# t**2, 1/sqrt(pi))
#addi(bessely(a, t)**2,
# [(2/sqrt(pi), meijerg([], [S(1)/2, S(1)/2 - a], [0, a, -a],
# [S(1)/2 - a], t**2)),
# (1/sqrt(pi), meijerg([S(1)/2], [], [a], [-a, 0], t**2))],
# True)
#addi(bessely(a, t)*bessely(b, t),
# [(2/sqrt(pi), meijerg([], [0, S(1)/2, (1 - a - b)/2],
# [(a + b)/2, (a - b)/2, (b - a)/2, -(a + b)/2],
# [(1 - a - b)/2], t**2)),
# (1/sqrt(pi), meijerg([0, S(1)/2], [], [(a + b)/2],
# [-(a + b)/2, (a - b)/2, (b - a)/2], t**2))],
# True)
# Section 8.4.21 ?
# Section 8.4.22
add(besseli(a, t), [], [(1 + a)/2], [a/2], [-a/2, (1 + a)/2], t**2/4, pi)
# TODO many more formulas. should all be derivable
# Section 8.4.23
add(besselk(a, t), [], [], [a/2, -a/2], [], t**2/4, S(1)/2)
# TODO many more formulas. should all be derivable
# Complete elliptic integrals K(z) and E(z)
from sympy import elliptic_k, elliptic_e
add(elliptic_k(t), [S.Half, S.Half], [], [0], [0], -t, S.Half)
add(elliptic_e(t), [S.Half, 3*S.Half], [], [0], [0], -t, -S.Half/2)
####################################################################
# First some helper functions.
####################################################################
from sympy.utilities.timeutils import timethis
timeit = timethis('meijerg')
def _mytype(f, x):
""" Create a hashable entity describing the type of f. """
if x not in f.free_symbols:
return ()
elif f.is_Function:
return (type(f),)
else:
types = [_mytype(a, x) for a in f.args]
res = []
for t in types:
res += list(t)
res.sort()
return tuple(res)
class _CoeffExpValueError(ValueError):
"""
Exception raised by _get_coeff_exp, for internal use only.
"""
pass
def _get_coeff_exp(expr, x):
"""
When expr is known to be of the form c*x**b, with c and/or b possibly 1,
return c, b.
>>> from sympy.abc import x, a, b
>>> from sympy.integrals.meijerint import _get_coeff_exp
>>> _get_coeff_exp(a*x**b, x)
(a, b)
>>> _get_coeff_exp(x, x)
(1, 1)
>>> _get_coeff_exp(2*x, x)
(2, 1)
>>> _get_coeff_exp(x**3, x)
(1, 3)
"""
from sympy import powsimp
(c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x)
if not m:
return c, S(0)
[m] = m
if m.is_Pow:
if m.base != x:
raise _CoeffExpValueError('expr not of form a*x**b')
return c, m.exp
elif m == x:
return c, S(1)
else:
raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)
def _exponents(expr, x):
"""
Find the exponents of ``x`` (not including zero) in ``expr``.
>>> from sympy.integrals.meijerint import _exponents
>>> from sympy.abc import x, y
>>> from sympy import sin
>>> _exponents(x, x)
set([1])
>>> _exponents(x**2, x)
set([2])
>>> _exponents(x**2 + x, x)
set([1, 2])
>>> _exponents(x**3*sin(x + x**y) + 1/x, x)
set([-1, 1, 3, y])
"""
def _exponents_(expr, x, res):
if expr == x:
res.update([1])
return
if expr.is_Pow and expr.base == x:
res.update([expr.exp])
return
for arg in expr.args:
_exponents_(arg, x, res)
res = set()
_exponents_(expr, x, res)
return res
def _functions(expr, x):
""" Find the types of functions in expr, to estimate the complexity. """
from sympy import Function
return set(e.func for e in expr.atoms(Function) if x in e.free_symbols)
def _find_splitting_points(expr, x):
"""
Find numbers a such that a linear substitution x -> x + a would
(hopefully) simplify expr.
>>> from sympy.integrals.meijerint import _find_splitting_points as fsp
>>> from sympy import sin
>>> from sympy.abc import a, x
>>> fsp(x, x)
set([0])
>>> fsp((x-1)**3, x)
set([1])
>>> fsp(sin(x+3)*x, x)
set([-3, 0])
"""
p, q = [Wild(n, exclude=[x]) for n in 'pq']
def compute_innermost(expr, res):
if not isinstance(expr, Expr):
return
m = expr.match(p*x + q)
if m and m[p] != 0:
res.add(-m[q]/m[p])
return
if expr.is_Atom:
return
for arg in expr.args:
compute_innermost(arg, res)
innermost = set()
compute_innermost(expr, innermost)
return innermost
def _split_mul(f, x):
"""
Split expression ``f`` into fac, po, g, where fac is a constant factor,
po = x**s for some s independent of s, and g is "the rest".
>>> from sympy.integrals.meijerint import _split_mul
>>> from sympy import sin
>>> from sympy.abc import s, x
>>> _split_mul((3*x)**s*sin(x**2)*x, x)
(3**s, x*x**s, sin(x**2))
"""
from sympy import polarify, unpolarify
fac = S(1)
po = S(1)
g = S(1)
f = expand_power_base(f)
args = Mul.make_args(f)
for a in args:
if a == x:
po *= x
elif x not in a.free_symbols:
fac *= a
else:
if a.is_Pow and x not in a.exp.free_symbols:
c, t = a.base.as_coeff_mul(x)
if t != (x,):
c, t = expand_mul(a.base).as_coeff_mul(x)
if t == (x,):
po *= x**a.exp
fac *= unpolarify(polarify(c**a.exp, subs=False))
continue
g *= a
return fac, po, g
def _mul_args(f):
"""
Return a list ``L`` such that Mul(*L) == f.
If f is not a Mul or Pow, L=[f].
If f=g**n for an integer n, L=[g]*n.
If f is a Mul, L comes from applying _mul_args to all factors of f.
"""
args = Mul.make_args(f)
gs = []
for g in args:
if g.is_Pow and g.exp.is_Integer:
n = g.exp
base = g.base
if n < 0:
n = -n
base = 1/base
gs += [base]*n
else:
gs.append(g)
return gs
def _mul_as_two_parts(f):
"""
Find all the ways to split f into a product of two terms.
Return None on failure.
Although the order is canonical from multiset_partitions, this is
not necessarily the best order to process the terms. For example,
if the case of len(gs) == 2 is removed and multiset is allowed to
sort the terms, some tests fail.
>>> from sympy.integrals.meijerint import _mul_as_two_parts
>>> from sympy import sin, exp, ordered
>>> from sympy.abc import x
>>> list(ordered(_mul_as_two_parts(x*sin(x)*exp(x))))
[(x, exp(x)*sin(x)), (x*exp(x), sin(x)), (x*sin(x), exp(x))]
"""
gs = _mul_args(f)
if len(gs) < 2:
return None
if len(gs) == 2:
return [tuple(gs)]
return [(Mul(*x), Mul(*y)) for (x, y) in multiset_partitions(gs, 2)]
def _inflate_g(g, n):
""" Return C, h such that h is a G function of argument z**n and
g = C*h. """
# TODO should this be a method of meijerg?
# See: [L, page 150, equation (5)]
def inflate(params, n):
""" (a1, .., ak) -> (a1/n, (a1+1)/n, ..., (ak + n-1)/n) """
res = []
for a in params:
for i in range(n):
res.append((a + i)/n)
return res
v = S(len(g.ap) - len(g.bq))
C = n**(1 + g.nu + v/2)
C /= (2*pi)**((n - 1)*g.delta)
return C, meijerg(inflate(g.an, n), inflate(g.aother, n),
inflate(g.bm, n), inflate(g.bother, n),
g.argument**n * n**(n*v))
def _flip_g(g):
""" Turn the G function into one of inverse argument
(i.e. G(1/x) -> G'(x)) """
# See [L], section 5.2
def tr(l):
return [1 - a for a in l]
return meijerg(tr(g.bm), tr(g.bother), tr(g.an), tr(g.aother), 1/g.argument)
def _inflate_fox_h(g, a):
r"""
Let d denote the integrand in the definition of the G function ``g``.
Consider the function H which is defined in the same way, but with
integrand d/Gamma(a*s) (contour conventions as usual).
If a is rational, the function H can be written as C*G, for a constant C
and a G-function G.
This function returns C, G.
"""
if a < 0:
return _inflate_fox_h(_flip_g(g), -a)
p = S(a.p)
q = S(a.q)
# We use the substitution s->qs, i.e. inflate g by q. We are left with an
# extra factor of Gamma(p*s), for which we use Gauss' multiplication
# theorem.
D, g = _inflate_g(g, q)
z = g.argument
D /= (2*pi)**((1 - p)/2)*p**(-S(1)/2)
z /= p**p
bs = [(n + 1)/p for n in range(p)]
return D, meijerg(g.an, g.aother, g.bm, list(g.bother) + bs, z)
_dummies = {}
def _dummy(name, token, expr, **kwargs):
"""
Return a dummy. This will return the same dummy if the same token+name is
requested more than once, and it is not already in expr.
This is for being cache-friendly.
"""
d = _dummy_(name, token, **kwargs)
if d in expr.free_symbols:
return Dummy(name, **kwargs)
return d
def _dummy_(name, token, **kwargs):
"""
Return a dummy associated to name and token. Same effect as declaring
it globally.
"""
global _dummies
if not (name, token) in _dummies:
_dummies[(name, token)] = Dummy(name, **kwargs)
return _dummies[(name, token)]
def _is_analytic(f, x):
""" Check if f(x), when expressed using G functions on the positive reals,
will in fact agree with the G functions almost everywhere """
from sympy import Heaviside, Abs
return not any(x in expr.free_symbols for expr in f.atoms(Heaviside, Abs))
def _condsimp(cond):
"""
Do naive simplifications on ``cond``.
Note that this routine is completely ad-hoc, simplification rules being
added as need arises rather than following any logical pattern.
>>> from sympy.integrals.meijerint import _condsimp as simp
>>> from sympy import Or, Eq, unbranched_argument as arg, And
>>> from sympy.abc import x, y, z
>>> simp(Or(x < y, z, Eq(x, y)))
Or(x <= y, z)
>>> simp(Or(x <= y, And(x < y, z)))
x <= y
"""
from sympy import (
symbols, Wild, Eq, unbranched_argument, exp_polar, pi, I,
periodic_argument, oo, polar_lift)
from sympy.logic.boolalg import BooleanFunction
if not isinstance(cond, BooleanFunction):
return cond
cond = cond.func(*list(map(_condsimp, cond.args)))
change = True
p, q, r = symbols('p q r', cls=Wild)
rules = [
(Or(p < q, Eq(p, q)), p <= q),
# The next two obviously are instances of a general pattern, but it is
# easier to spell out the few cases we care about.
(And(abs(unbranched_argument(p)) <= pi,
abs(unbranched_argument(exp_polar(-2*pi*I)*p)) <= pi),
Eq(unbranched_argument(exp_polar(-I*pi)*p), 0)),
(And(abs(unbranched_argument(p)) <= pi/2,
abs(unbranched_argument(exp_polar(-pi*I)*p)) <= pi/2),
Eq(unbranched_argument(exp_polar(-I*pi/2)*p), 0)),
(Or(p <= q, And(p < q, r)), p <= q)
]
while change:
change = False
for fro, to in rules:
if fro.func != cond.func:
continue
for n, arg in enumerate(cond.args):
if r in fro.args[0].free_symbols:
m = arg.match(fro.args[1])
num = 1
else:
num = 0
m = arg.match(fro.args[0])
if not m:
continue
otherargs = [x.subs(m) for x in fro.args[:num] + fro.args[num + 1:]]
otherlist = [n]
for arg2 in otherargs:
for k, arg3 in enumerate(cond.args):
if k in otherlist:
continue
if arg2 == arg3:
otherlist += [k]
break
if arg3.func is And and arg2.args[1] == r and \
arg2.func is And and arg2.args[0] in arg3.args:
otherlist += [k]
break
if arg3.func is And and arg2.args[0] == r and \
arg2.func is And and arg2.args[1] in arg3.args:
otherlist += [k]
break
if len(otherlist) != len(otherargs) + 1:
continue
newargs = [arg for (k, arg) in enumerate(cond.args)
if k not in otherlist] + [to.subs(m)]
cond = cond.func(*newargs)
change = True
break
# final tweak
def repl_eq(orig):
if orig.lhs == 0:
expr = orig.rhs
elif orig.rhs == 0:
expr = orig.lhs
else:
return orig
m = expr.match(unbranched_argument(polar_lift(p)**q))
if not m:
if expr.func is periodic_argument and not expr.args[0].is_polar \
and expr.args[1] == oo:
return (expr.args[0] > 0)
return orig
return (m[p] > 0)
return cond.replace(
lambda expr: expr.is_Relational and expr.rel_op == '==',
repl_eq)
def _eval_cond(cond):
""" Re-evaluate the conditions. """
if isinstance(cond, bool):
return cond
return _condsimp(cond.doit())
####################################################################
# Now the "backbone" functions to do actual integration.
####################################################################
def _my_principal_branch(expr, period, full_pb=False):
""" Bring expr nearer to its principal branch by removing superfluous
factors.
This function does *not* guarantee to yield the principal branch,
to avoid introducing opaque principal_branch() objects,
unless full_pb=True. """
from sympy import principal_branch
res = principal_branch(expr, period)
if not full_pb:
res = res.replace(principal_branch, lambda x, y: x)
return res
def _rewrite_saxena_1(fac, po, g, x):
"""
Rewrite the integral fac*po*g dx, from zero to infinity, as
integral fac*G, where G has argument a*x. Note po=x**s.
Return fac, G.
"""
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
period = g.get_period()
a = _my_principal_branch(a, period)
# We substitute t = x**b.
C = fac/(abs(b)*a**((s + 1)/b - 1))
# Absorb a factor of (at)**((1 + s)/b - 1).
def tr(l):
return [a + (1 + s)/b - 1 for a in l]
return C, meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother),
a*x)
def _check_antecedents_1(g, x, helper=False):
"""
Return a condition under which the mellin transform of g exists.
Any power of x has already been absorbed into the G function,
so this is just int_0^\infty g dx.
See [L, section 5.6.1]. (Note that s=1.)
If ``helper`` is True, only check if the MT exists at infinity, i.e. if
int_1^\infty g dx exists.
"""
# NOTE if you update these conditions, please update the documentation as well
from sympy import Eq, Not, ceiling, Ne, re, unbranched_argument as arg
delta = g.delta
eta, _ = _get_coeff_exp(g.argument, x)
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
xi = m + n - p
if p > q:
def tr(l):
return [1 - x for x in l]
return _check_antecedents_1(meijerg(tr(g.bm), tr(g.bother),
tr(g.an), tr(g.aother), x/eta),
x)
tmp = []
for b in g.bm:
tmp += [-re(b) < 1]
for a in g.an:
tmp += [1 < 1 - re(a)]
cond_3 = And(*tmp)
for b in g.bother:
tmp += [-re(b) < 1]
for a in g.aother:
tmp += [1 < 1 - re(a)]
cond_3_star = And(*tmp)
cond_4 = (-re(g.nu) + (q + 1 - p)/2 > q - p)
def debug(*msg):
_debug(*msg)
debug('Checking antecedents for 1 function:')
debug(' delta=%s, eta=%s, m=%s, n=%s, p=%s, q=%s'
% (delta, eta, m, n, p, q))
debug(' ap = %s, %s' % (list(g.an), list(g.aother)))
debug(' bq = %s, %s' % (list(g.bm), list(g.bother)))
debug(' cond_3=%s, cond_3*=%s, cond_4=%s' % (cond_3, cond_3_star, cond_4))
conds = []
# case 1
case1 = []
tmp1 = [1 <= n, p < q, 1 <= m]
tmp2 = [1 <= p, 1 <= m, Eq(q, p + 1), Not(And(Eq(n, 0), Eq(m, p + 1)))]
tmp3 = [1 <= p, Eq(q, p)]
for k in range(ceiling(delta/2) + 1):
tmp3 += [Ne(abs(arg(eta)), (delta - 2*k)*pi)]
tmp = [delta > 0, abs(arg(eta)) < delta*pi]
extra = [Ne(eta, 0), cond_3]
if helper:
extra = []
for t in [tmp1, tmp2, tmp3]:
case1 += [And(*(t + tmp + extra))]
conds += case1
debug(' case 1:', case1)
# case 2
extra = [cond_3]
if helper:
extra = []
case2 = [And(Eq(n, 0), p + 1 <= m, m <= q,
abs(arg(eta)) < delta*pi, *extra)]
conds += case2
debug(' case 2:', case2)
# case 3
extra = [cond_3, cond_4]
if helper:
extra = []
case3 = [And(p < q, 1 <= m, delta > 0, Eq(abs(arg(eta)), delta*pi),
*extra)]
case3 += [And(p <= q - 2, Eq(delta, 0), Eq(abs(arg(eta)), 0), *extra)]
conds += case3
debug(' case 3:', case3)
# TODO altered cases 4-7
# extra case from wofram functions site:
# (reproduced verbatim from Prudnikov, section 2.24.2)
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/01/
case_extra = []
case_extra += [Eq(p, q), Eq(delta, 0), Eq(arg(eta), 0), Ne(eta, 0)]
if not helper:
case_extra += [cond_3]
s = []
for a, b in zip(g.ap, g.bq):
s += [b - a]
case_extra += [re(Add(*s)) < 0]
case_extra = And(*case_extra)
conds += [case_extra]
debug(' extra case:', [case_extra])
case_extra_2 = [And(delta > 0, abs(arg(eta)) < delta*pi)]
if not helper:
case_extra_2 += [cond_3]
case_extra_2 = And(*case_extra_2)
conds += [case_extra_2]
debug(' second extra case:', [case_extra_2])
# TODO This leaves only one case from the three listed by Prudnikov.
# Investigate if these indeed cover everything; if so, remove the rest.
return Or(*conds)
def _int0oo_1(g, x):
"""
Evaluate int_0^\infty g dx using G functions,
assuming the necessary conditions are fulfilled.
>>> from sympy.abc import a, b, c, d, x, y
>>> from sympy import meijerg
>>> from sympy.integrals.meijerint import _int0oo_1
>>> _int0oo_1(meijerg([a], [b], [c], [d], x*y), x)
gamma(-a)*gamma(c + 1)/(y*gamma(-d)*gamma(b + 1))
"""
# See [L, section 5.6.1]. Note that s=1.
from sympy import gamma, combsimp, unpolarify
eta, _ = _get_coeff_exp(g.argument, x)
res = 1/eta
# XXX TODO we should reduce order first
for b in g.bm:
res *= gamma(b + 1)
for a in g.an:
res *= gamma(1 - a - 1)
for b in g.bother:
res /= gamma(1 - b - 1)
for a in g.aother:
res /= gamma(a + 1)
return combsimp(unpolarify(res))
def _rewrite_saxena(fac, po, g1, g2, x, full_pb=False):
"""
Rewrite the integral fac*po*g1*g2 from 0 to oo in terms of G functions
with argument c*x.
Return C, f1, f2 such that integral C f1 f2 from 0 to infinity equals
integral fac po g1 g2 from 0 to infinity.
>>> from sympy.integrals.meijerint import _rewrite_saxena
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg
>>> g1 = meijerg([], [], [0], [], s*t)
>>> g2 = meijerg([], [], [m/2], [-m/2], t**2/4)
>>> r = _rewrite_saxena(1, t**0, g1, g2, t)
>>> r[0]
s/(4*sqrt(pi))
>>> r[1]
meijerg(((), ()), ((-1/2, 0), ()), s**2*t/4)
>>> r[2]
meijerg(((), ()), ((m/2,), (-m/2,)), t/4)
"""
from sympy.core.numbers import ilcm
def pb(g):
a, b = _get_coeff_exp(g.argument, x)
per = g.get_period()
return meijerg(g.an, g.aother, g.bm, g.bother,
_my_principal_branch(a, per, full_pb)*x**b)
_, s = _get_coeff_exp(po, x)
_, b1 = _get_coeff_exp(g1.argument, x)
_, b2 = _get_coeff_exp(g2.argument, x)
if (b1 < 0) == True:
b1 = -b1
g1 = _flip_g(g1)
if (b2 < 0) == True:
b2 = -b2
g2 = _flip_g(g2)
if not b1.is_Rational or not b2.is_Rational:
return
m1, n1 = b1.p, b1.q
m2, n2 = b2.p, b2.q
tau = ilcm(m1*n2, m2*n1)
r1 = tau//(m1*n2)
r2 = tau//(m2*n1)
C1, g1 = _inflate_g(g1, r1)
C2, g2 = _inflate_g(g2, r2)
g1 = pb(g1)
g2 = pb(g2)
fac *= C1*C2
a1, b = _get_coeff_exp(g1.argument, x)
a2, _ = _get_coeff_exp(g2.argument, x)
# arbitrarily tack on the x**s part to g1
# TODO should we try both?
exp = (s + 1)/b - 1
fac = fac/(abs(b) * a1**exp)
def tr(l):
return [a + exp for a in l]
g1 = meijerg(tr(g1.an), tr(g1.aother), tr(g1.bm), tr(g1.bother), a1*x)
g2 = meijerg(g2.an, g2.aother, g2.bm, g2.bother, a2*x)
return powdenest(fac, polar=True), g1, g2
def _check_antecedents(g1, g2, x):
""" Return a condition under which the integral theorem applies. """
from sympy import re, Eq, Ne, cos, I, exp, sin, sign, unpolarify
from sympy import arg as arg_, unbranched_argument as arg
# Yes, this is madness.
# XXX TODO this is a testing *nightmare*
# NOTE if you update these conditions, please update the documentation as well
# The following conditions are found in
# [P], Section 2.24.1
#
# They are also reproduced (verbatim!) at
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/
#
# Note: k=l=r=alpha=1
sigma, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
s, t, u, v = S([len(g1.bm), len(g1.an), len(g1.ap), len(g1.bq)])
m, n, p, q = S([len(g2.bm), len(g2.an), len(g2.ap), len(g2.bq)])
bstar = s + t - (u + v)/2
cstar = m + n - (p + q)/2
rho = g1.nu + (u - v)/2 + 1
mu = g2.nu + (p - q)/2 + 1
phi = q - p - (v - u)
eta = 1 - (v - u) - mu - rho
psi = (pi*(q - m - n) + abs(arg(omega)))/(q - p)
theta = (pi*(v - s - t) + abs(arg(sigma)))/(v - u)
lambda_c = (q - p)*abs(omega)**(1/(q - p))*cos(psi) \
+ (v - u)*abs(sigma)**(1/(v - u))*cos(theta)
def lambda_s0(c1, c2):
return c1*(q - p)*abs(omega)**(1/(q - p))*sin(psi) \
+ c2*(v - u)*abs(sigma)**(1/(v - u))*sin(theta)
lambda_s = Piecewise(
((lambda_s0(+1, +1)*lambda_s0(-1, -1)),
And(Eq(arg(sigma), 0), Eq(arg(omega), 0))),
(lambda_s0(sign(arg(omega)), +1)*lambda_s0(sign(arg(omega)), -1),
And(Eq(arg(sigma), 0), Ne(arg(omega), 0))),
(lambda_s0(+1, sign(arg(sigma)))*lambda_s0(-1, sign(arg(sigma))),
And(Ne(arg(sigma), 0), Eq(arg(omega), 0))),
(lambda_s0(sign(arg(omega)), sign(arg(sigma))), True))
_debug('Checking antecedents:')
_debug(' sigma=%s, s=%s, t=%s, u=%s, v=%s, b*=%s, rho=%s'
% (sigma, s, t, u, v, bstar, rho))
_debug(' omega=%s, m=%s, n=%s, p=%s, q=%s, c*=%s, mu=%s,'
% (omega, m, n, p, q, cstar, mu))
_debug(' phi=%s, eta=%s, psi=%s, theta=%s' % (phi, eta, psi, theta))
def _c1():
for g in [g1, g2]:
for i in g.an:
for j in g.bm:
diff = i - j
if diff.is_integer and diff.is_positive:
return False
return True
c1 = _c1()
c2 = And(*[re(1 + i + j) > 0 for i in g1.bm for j in g2.bm])
c3 = And(*[re(1 + i + j) < 1 + 1 for i in g1.an for j in g2.an])
c4 = And(*[(p - q)*re(1 + i - 1) - re(mu) > -S(3)/2 for i in g1.an])
c5 = And(*[(p - q)*re(1 + i) - re(mu) > -S(3)/2 for i in g1.bm])
c6 = And(*[(u - v)*re(1 + i - 1) - re(rho) > -S(3)/2 for i in g2.an])
c7 = And(*[(u - v)*re(1 + i) - re(rho) > -S(3)/2 for i in g2.bm])
c8 = (abs(phi) + 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c9 = (abs(phi) - 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c10 = (abs(arg(sigma)) < bstar*pi)
c11 = Eq(abs(arg(sigma)), bstar*pi)
c12 = (abs(arg(omega)) < cstar*pi)
c13 = Eq(abs(arg(omega)), cstar*pi)
# The following condition is *not* implemented as stated on the wolfram
# function site. In the book of Prudnikov there is an additional part
# (the And involving re()). However, I only have this book in russian, and
# I don't read any russian. The following condition is what other people
# have told me it means.
# Worryingly, it is different from the condition implemented in REDUCE.
# The REDUCE implementation:
# https://reduce-algebra.svn.sourceforge.net/svnroot/reduce-algebra/trunk/packages/defint/definta.red
# (search for tst14)
# The Wolfram alpha version:
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/03/0014/
z0 = exp(-(bstar + cstar)*pi*I)
zos = unpolarify(z0*omega/sigma)
zso = unpolarify(z0*sigma/omega)
if zos == 1/zso:
c14 = And(Eq(phi, 0), bstar + cstar <= 1,
Or(Ne(zos, 1), re(mu + rho + v - u) < 1,
re(mu + rho + q - p) < 1))
else:
c14 = And(Eq(phi, 0), bstar - 1 + cstar <= 0,
Or(And(Ne(zos, 1), abs(arg_(1 - zos)) < pi),
And(re(mu + rho + v - u) < 1, Eq(zos, 1))))
c14_alt = And(Eq(phi, 0), cstar - 1 + bstar <= 0,
Or(And(Ne(zso, 1), abs(arg_(1 - zso)) < pi),
And(re(mu + rho + q - p) < 1, Eq(zso, 1))))
# Since r=k=l=1, in our case there is c14_alt which is the same as calling
# us with (g1, g2) = (g2, g1). The conditions below enumerate all cases
# (i.e. we don't have to try arguments reversed by hand), and indeed try
# all symmetric cases. (i.e. whenever there is a condition involving c14,
# there is also a dual condition which is exactly what we would get when g1,
# g2 were interchanged, *but c14 was unaltered*).
# Hence the following seems correct:
c14 = Or(c14, c14_alt)
tmp = [lambda_c > 0,
And(Eq(lambda_c, 0), Ne(lambda_s, 0), re(eta) > -1),
And(Eq(lambda_c, 0), Eq(lambda_s, 0), re(eta) > 0)]
c15 = Or(*tmp)
if _eval_cond(lambda_c > 0) != False:
c15 = (lambda_c > 0)
for cond, i in [(c1, 1), (c2, 2), (c3, 3), (c4, 4), (c5, 5), (c6, 6),
(c7, 7), (c8, 8), (c9, 9), (c10, 10), (c11, 11),
(c12, 12), (c13, 13), (c14, 14), (c15, 15)]:
_debug(' c%s:' % i, cond)
# We will return Or(*conds)
conds = []
def pr(count):
_debug(' case %s:' % count, conds[-1])
conds += [And(m*n*s*t != 0, bstar.is_positive is True, cstar.is_positive is True, c1, c2, c3, c10,
c12)] # 1
pr(1)
conds += [And(Eq(u, v), Eq(bstar, 0), cstar.is_positive is True, sigma.is_positive is True, re(rho) < 1,
c1, c2, c3, c12)] # 2
pr(2)
conds += [And(Eq(p, q), Eq(cstar, 0), bstar.is_positive is True, omega.is_positive is True, re(mu) < 1,
c1, c2, c3, c10)] # 3
pr(3)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu) < 1, re(rho) < 1,
Ne(sigma, omega), c1, c2, c3)] # 4
pr(4)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu + rho) < 1,
Ne(omega, sigma), c1, c2, c3)] # 5
pr(5)
conds += [And(p > q, s.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c5, c10, c13)] # 6
pr(6)
conds += [And(p < q, t.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c4, c10, c13)] # 7
pr(7)
conds += [And(u > v, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c7, c11, c12)] # 8
pr(8)
conds += [And(u < v, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c6, c11, c12)] # 9
pr(9)
conds += [And(p > q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c5, c13)] # 10
pr(10)
conds += [And(p < q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c4, c13)] # 11
pr(11)
conds += [And(Eq(p, q), u > v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c7, c11)] # 12
pr(12)
conds += [And(Eq(p, q), u < v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c6, c11)] # 13
pr(13)
conds += [And(p < q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c7, c11, c13)] # 14
pr(14)
conds += [And(p > q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c6, c11, c13)] # 15
pr(15)
conds += [And(p > q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c7, c8, c11, c13, c14)] # 16
pr(16)
conds += [And(p < q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c6, c9, c11, c13, c14)] # 17
pr(17)
conds += [And(Eq(t, 0), s.is_positive is True, bstar.is_positive is True, phi.is_positive is True, c1, c2, c10)] # 18
pr(18)
conds += [And(Eq(s, 0), t.is_positive is True, bstar.is_positive is True, phi.is_negative is True, c1, c3, c10)] # 19
pr(19)
conds += [And(Eq(n, 0), m.is_positive is True, cstar.is_positive is True, phi.is_negative is True, c1, c2, c12)] # 20
pr(20)
conds += [And(Eq(m, 0), n.is_positive is True, cstar.is_positive is True, phi.is_positive is True, c1, c3, c12)] # 21
pr(21)
conds += [And(Eq(s*t, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 22
pr(22)
conds += [And(Eq(m*n, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 23
pr(23)
# The following case is from [Luke1969]. As far as I can tell, it is *not*
# covered by Prudnikov's.
# Let G1 and G2 be the two G-functions. Suppose the integral exists from
# 0 to a > 0 (this is easy the easy part), that G1 is exponential decay at
# infinity, and that the mellin transform of G2 exists.
# Then the integral exists.
mt1_exists = _check_antecedents_1(g1, x, helper=True)
mt2_exists = _check_antecedents_1(g2, x, helper=True)
conds += [And(mt2_exists, Eq(t, 0), u < s, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E1')
conds += [And(mt2_exists, Eq(s, 0), v < t, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E2')
conds += [And(mt1_exists, Eq(n, 0), p < m, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E3')
conds += [And(mt1_exists, Eq(m, 0), q < n, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E4')
# Let's short-circuit if this worked ...
# the rest is corner-cases and terrible to read.
r = Or(*conds)
if _eval_cond(r) != False:
return r
conds += [And(m + n > p, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
abs(arg(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 24
pr(24)
conds += [And(m + n > q, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
abs(arg(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 25
pr(25)
conds += [And(Eq(p, q - 1), Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < abs(arg(omega)),
c1, c2, c10, c14, c15)] # 26
pr(26)
conds += [And(Eq(p, q + 1), Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < abs(arg(omega)),
c1, c3, c10, c14, c15)] # 27
pr(27)
conds += [And(p < q - 1, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < abs(arg(omega)),
abs(arg(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 28
pr(28)
conds += [And(
p > q + 1, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar >= 0,
cstar*pi < abs(arg(omega)),
abs(arg(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 29
pr(29)
conds += [And(Eq(n, 0), Eq(phi, 0), s + t > 0, m.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
abs(arg(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 30
pr(30)
conds += [And(Eq(m, 0), Eq(phi, 0), s + t > v, n.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
abs(arg(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 31
pr(31)
conds += [And(Eq(n, 0), Eq(phi, 0), Eq(u, v - 1), m.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (bstar + 1)*pi,
c1, c2, c12, c14, c15)] # 32
pr(32)
conds += [And(Eq(m, 0), Eq(phi, 0), Eq(u, v + 1), n.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (bstar + 1)*pi,
c1, c3, c12, c14, c15)] # 33
pr(33)
conds += [And(
Eq(n, 0), Eq(phi, 0), u < v - 1, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 34
pr(34)
conds += [And(
Eq(m, 0), Eq(phi, 0), u > v + 1, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 35
pr(35)
return Or(*conds)
# NOTE An alternative, but as far as I can tell weaker, set of conditions
# can be found in [L, section 5.6.2].
def _int0oo(g1, g2, x):
"""
Express integral from zero to infinity g1*g2 using a G function,
assuming the necessary conditions are fulfilled.
>>> from sympy.integrals.meijerint import _int0oo
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg, S
>>> g1 = meijerg([], [], [-S(1)/2, 0], [], s**2*t/4)
>>> g2 = meijerg([], [], [m/2], [-m/2], t/4)
>>> _int0oo(g1, g2, t)
4*meijerg(((1/2, 0), ()), ((m/2,), (-m/2,)), s**(-2))/s**2
"""
# See: [L, section 5.6.2, equation (1)]
eta, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
def neg(l):
return [-x for x in l]
a1 = neg(g1.bm) + list(g2.an)
a2 = list(g2.aother) + neg(g1.bother)
b1 = neg(g1.an) + list(g2.bm)
b2 = list(g2.bother) + neg(g1.aother)
return meijerg(a1, a2, b1, b2, omega/eta)/eta
def _rewrite_inversion(fac, po, g, x):
""" Absorb ``po`` == x**s into g. """
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
def tr(l):
return [t + s/b for t in l]
return (powdenest(fac/a**(s/b), polar=True),
meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), g.argument))
def _check_antecedents_inversion(g, x):
""" Check antecedents for the laplace inversion integral. """
from sympy import re, im, Or, And, Eq, exp, I, Add, nan, Ne
_debug('Checking antecedents for inversion:')
z = g.argument
_, e = _get_coeff_exp(z, x)
if e < 0:
_debug(' Flipping G.')
# We want to assume that argument gets large as |x| -> oo
return _check_antecedents_inversion(_flip_g(g), x)
def statement_half(a, b, c, z, plus):
coeff, exponent = _get_coeff_exp(z, x)
a *= exponent
b *= coeff**c
c *= exponent
conds = []
wp = b*exp(I*re(c)*pi/2)
wm = b*exp(-I*re(c)*pi/2)
if plus:
w = wp
else:
w = wm
conds += [And(Or(Eq(b, 0), re(c) <= 0), re(a) <= -1)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) < 0)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) <= 0,
re(a) <= -1)]
return Or(*conds)
def statement(a, b, c, z):
""" Provide a convergence statement for z**a * exp(b*z**c),
c/f sphinx docs. """
return And(statement_half(a, b, c, z, True),
statement_half(a, b, c, z, False))
# Notations from [L], section 5.7-10
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
tau = m + n - p
nu = q - m - n
rho = (tau - nu)/2
sigma = q - p
if sigma == 1:
epsilon = S(1)/2
elif sigma > 1:
epsilon = 1
else:
epsilon = nan
theta = ((1 - sigma)/2 + Add(*g.bq) - Add(*g.ap))/sigma
delta = g.delta
_debug(' m=%s, n=%s, p=%s, q=%s, tau=%s, nu=%s, rho=%s, sigma=%s' % (
m, n, p, q, tau, nu, rho, sigma))
_debug(' epsilon=%s, theta=%s, delta=%s' % (epsilon, theta, delta))
# First check if the computation is valid.
if not (g.delta >= e/2 or (p >= 1 and p >= q)):
_debug(' Computation not valid for these parameters.')
return False
# Now check if the inversion integral exists.
# Test "condition A"
for a in g.an:
for b in g.bm:
if (a - b).is_integer and a > b:
_debug(' Not a valid G function.')
return False
# There are two cases. If p >= q, we can directly use a slater expansion
# like [L], 5.2 (11). Note in particular that the asymptotics of such an
# expansion even hold when some of the parameters differ by integers, i.e.
# the formula itself would not be valid! (b/c G functions are cts. in their
# parameters)
# When p < q, we need to use the theorems of [L], 5.10.
if p >= q:
_debug(' Using asymptotic Slater expansion.')
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def E(z):
return And(*[statement(a - 1, 0, z) for a in g.an])
def H(z):
return statement(theta, -sigma, 1/sigma, z)
def Hp(z):
return statement_half(theta, -sigma, 1/sigma, z, True)
def Hm(z):
return statement_half(theta, -sigma, 1/sigma, z, False)
# [L], section 5.10
conds = []
# Theorem 1
conds += [And(1 <= n, p < q, 1 <= m, rho*pi - delta >= pi/2, delta > 0,
E(z*exp(I*pi*(nu + 1))))]
# Theorem 2, statements (2) and (3)
conds += [And(p + 1 <= m, m + 1 <= q, delta > 0, delta < pi/2, n == 0,
(m - p + 1)*pi - delta >= pi/2,
Hp(z*exp(I*pi*(q - m))), Hm(z*exp(-I*pi*(q - m))))]
# Theorem 2, statement (5)
conds += [And(p < q, m == q, n == 0, delta > 0,
(sigma + epsilon)*pi - delta >= pi/2, H(z))]
# Theorem 3, statements (6) and (7)
conds += [And(Or(And(p <= q - 2, 1 <= tau, tau <= sigma/2),
And(p + 1 <= m + n, m + n <= (p + q)/2)),
delta > 0, delta < pi/2, (tau + 1)*pi - delta >= pi/2,
Hp(z*exp(I*pi*nu)), Hm(z*exp(-I*pi*nu)))]
# Theorem 4, statements (10) and (11)
conds += [And(p < q, 1 <= m, rho > 0, delta > 0, delta + rho*pi < pi/2,
(tau + epsilon)*pi - delta >= pi/2,
Hp(z*exp(I*pi*nu)), Hm(z*exp(-I*pi*nu)))]
# Trivial case
conds += [m == 0]
# TODO
# Theorem 5 is quite general
# Theorem 6 contains special cases for q=p+1
return Or(*conds)
def _int_inversion(g, x, t):
"""
Compute the laplace inversion integral, assuming the formula applies.
"""
b, a = _get_coeff_exp(g.argument, x)
C, g = _inflate_fox_h(meijerg(g.an, g.aother, g.bm, g.bother, b/t**a), -a)
return C/t*g
####################################################################
# Finally, the real meat.
####################################################################
_lookup_table = None
@cacheit
@timeit
def _rewrite_single(f, x, recursive=True):
"""
Try to rewrite f as a sum of single G functions of the form
C*x**s*G(a*x**b), where b is a rational number and C is independent of x.
We guarantee that result.argument.as_coeff_mul(x) returns (a, (x**b,))
or (a, ()).
Returns a list of tuples (C, s, G) and a condition cond.
Returns None on failure.
"""
from sympy import polarify, unpolarify, oo, zoo, Tuple
global _lookup_table
if not _lookup_table:
_lookup_table = {}
_create_lookup_table(_lookup_table)
if isinstance(f, meijerg):
from sympy import factor
coeff, m = factor(f.argument, x).as_coeff_mul(x)
if len(m) > 1:
return None
m = m[0]
if m.is_Pow:
if m.base != x or not m.exp.is_Rational:
return None
elif m != x:
return None
return [(1, 0, meijerg(f.an, f.aother, f.bm, f.bother, coeff*m))], True
f_ = f
f = f.subs(x, z)
t = _mytype(f, z)
if t in _lookup_table:
l = _lookup_table[t]
for formula, terms, cond, hint in l:
subs = f.match(formula, old=True)
if subs:
subs_ = {}
for fro, to in subs.items():
subs_[fro] = unpolarify(polarify(to, lift=True),
exponents_only=True)
subs = subs_
if not isinstance(hint, bool):
hint = hint.subs(subs)
if hint == False:
continue
if not isinstance(cond, (bool, BooleanAtom)):
cond = unpolarify(cond.subs(subs))
if _eval_cond(cond) == False:
continue
if not isinstance(terms, list):
terms = terms(subs)
res = []
for fac, g in terms:
r1 = _get_coeff_exp(unpolarify(fac.subs(subs).subs(z, x),
exponents_only=True), x)
g = g.subs(subs).subs(z, x)
# NOTE these substitutions can in principle introduce oo,
# zoo and other absurdities. It shouldn't matter,
# but better be safe.
if Tuple(*(r1 + (g,))).has(oo, zoo, -oo):
continue
g = meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(g.argument, exponents_only=True))
res.append(r1 + (g,))
if res:
return res, cond
# try recursive mellin transform
if not recursive:
return None
_debug('Trying recursive Mellin transform method.')
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, IntegralTransformError,
MellinTransformStripError)
from sympy import oo, nan, zoo, simplify, cancel
def my_imt(F, s, x, strip):
""" Calling simplify() all the time is slow and not helpful, since
most of the time it only factors things in a way that has to be
un-done anyway. But sometimes it can remove apparent poles. """
# XXX should this be in inverse_mellin_transform?
try:
return inverse_mellin_transform(F, s, x, strip,
as_meijerg=True, needeval=True)
except MellinTransformStripError:
return inverse_mellin_transform(
simplify(cancel(expand(F))), s, x, strip,
as_meijerg=True, needeval=True)
f = f_
s = _dummy('s', 'rewrite-single', f)
# to avoid infinite recursion, we have to force the two g functions case
def my_integrator(f, x):
from sympy import Integral, hyperexpand
r = _meijerint_definite_4(f, x, only_double=True)
if r is not None:
res, cond = r
res = _my_unpolarify(hyperexpand(res, rewrite='nonrepsmall'))
return Piecewise((res, cond),
(Integral(f, (x, 0, oo)), True))
return Integral(f, (x, 0, oo))
try:
F, strip, _ = mellin_transform(f, x, s, integrator=my_integrator,
simplify=False, needeval=True)
g = my_imt(F, s, x, strip)
except IntegralTransformError:
g = None
if g is None:
# We try to find an expression by analytic continuation.
# (also if the dummy is already in the expression, there is no point in
# putting in another one)
a = _dummy_('a', 'rewrite-single')
if a not in f.free_symbols and _is_analytic(f, x):
try:
F, strip, _ = mellin_transform(f.subs(x, a*x), x, s,
integrator=my_integrator,
needeval=True, simplify=False)
g = my_imt(F, s, x, strip).subs(a, 1)
except IntegralTransformError:
g = None
if g is None or g.has(oo, nan, zoo):
_debug('Recursive Mellin transform failed.')
return None
args = Add.make_args(g)
res = []
for f in args:
c, m = f.as_coeff_mul(x)
if len(m) > 1:
raise NotImplementedError('Unexpected form...')
g = m[0]
a, b = _get_coeff_exp(g.argument, x)
res += [(c, 0, meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(polarify(
a, lift=True), exponents_only=True)
*x**b))]
_debug('Recursive Mellin transform worked:', g)
return res, True
def _rewrite1(f, x, recursive=True):
"""
Try to rewrite f using a (sum of) single G functions with argument a*x**b.
Return fac, po, g such that f = fac*po*g, fac is independent of x
and po = x**s.
Here g is a result from _rewrite_single.
Return None on failure.
"""
fac, po, g = _split_mul(f, x)
g = _rewrite_single(g, x, recursive)
if g:
return fac, po, g[0], g[1]
def _rewrite2(f, x):
"""
Try to rewrite f as a product of two G functions of arguments a*x**b.
Return fac, po, g1, g2 such that f = fac*po*g1*g2, where fac is
independent of x and po is x**s.
Here g1 and g2 are results of _rewrite_single.
Returns None on failure.
"""
fac, po, g = _split_mul(f, x)
if any(_rewrite_single(expr, x, False) is None for expr in _mul_args(g)):
return None
l = _mul_as_two_parts(g)
if not l:
return None
l = list(ordered(l, [
lambda p: max(len(_exponents(p[0], x)), len(_exponents(p[1], x))),
lambda p: max(len(_functions(p[0], x)), len(_functions(p[1], x))),
lambda p: max(len(_find_splitting_points(p[0], x)),
len(_find_splitting_points(p[1], x)))]))
for recursive in [False, True]:
for fac1, fac2 in l:
g1 = _rewrite_single(fac1, x, recursive)
g2 = _rewrite_single(fac2, x, recursive)
if g1 and g2:
cond = And(g1[1], g2[1])
if cond != False:
return fac, po, g1[0], g2[0], cond
def meijerint_indefinite(f, x):
"""
Compute an indefinite integral of ``f`` by rewriting it as a G function.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_indefinite
>>> from sympy import sin
>>> from sympy.abc import x
>>> meijerint_indefinite(sin(x), x)
-cos(x)
"""
from sympy import hyper, meijerg
results = []
for a in sorted(_find_splitting_points(f, x) | set([S(0)]), key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, x + a), x)
if not res:
continue
res = res.subs(x, x - a)
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(
_rewrite_hyperbolics_as_exp(f), x)
if rv:
if not type(rv) is list:
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
def _meijerint_indefinite_1(f, x):
""" Helper that does not attempt any substitution. """
from sympy import Integral, piecewise_fold
_debug('Trying to compute the indefinite integral of', f, 'wrt', x)
gs = _rewrite1(f, x)
if gs is None:
# Note: the code that calls us will do expand() and try again
return None
fac, po, gl, cond = gs
_debug(' could rewrite:', gs)
res = S(0)
for C, s, g in gl:
a, b = _get_coeff_exp(g.argument, x)
_, c = _get_coeff_exp(po, x)
c += s
# we do a substitution t=a*x**b, get integrand fac*t**rho*g
fac_ = fac * C / (b*a**((1 + c)/b))
rho = (c + 1)/b - 1
# we now use t**rho*G(params, t) = G(params + rho, t)
# [L, page 150, equation (4)]
# and integral G(params, t) dt = G(1, params+1, 0, t)
# (or a similar expression with 1 and 0 exchanged ... pick the one
# which yields a well-defined function)
# [R, section 5]
# (Note that this dummy will immediately go away again, so we
# can safely pass S(1) for ``expr``.)
t = _dummy('t', 'meijerint-indefinite', S(1))
def tr(p):
return [a + rho + 1 for a in p]
if any(b.is_integer and (b <= 0) == True for b in tr(g.bm)):
r = -meijerg(
tr(g.an), tr(g.aother) + [1], tr(g.bm) + [0], tr(g.bother), t)
else:
r = meijerg(
tr(g.an) + [1], tr(g.aother), tr(g.bm), tr(g.bother) + [0], t)
r = hyperexpand(r.subs(t, a*x**b))
# now substitute back
# Note: we really do want the powers of x to combine.
res += powdenest(fac_*r, polar=True)
def _clean(res):
"""This multiplies out superfluous powers of x we created, and chops off
constants:
>> _clean(x*(exp(x)/x - 1/x) + 3)
exp(x)
cancel is used before mul_expand since it is possible for an
expression to have an additive constant that doesn't become isolated
with simple expansion. Such a situation was identified in issue 6369:
>>> from sympy import sqrt, cancel
>>> from sympy.abc import x
>>> a = sqrt(2*x + 1)
>>> bad = (3*x*a**5 + 2*x - a**5 + 1)/a**2
>>> bad.expand().as_independent(x)[0]
0
>>> cancel(bad).expand().as_independent(x)[0]
1
"""
from sympy import cancel
res = expand_mul(cancel(res), deep=False)
return Add._from_args(res.as_coeff_add(x)[1])
res = piecewise_fold(res)
if res.is_Piecewise:
newargs = []
for expr, cond in res.args:
expr = _my_unpolarify(_clean(expr))
newargs += [(expr, cond)]
res = Piecewise(*newargs)
else:
res = _my_unpolarify(_clean(res))
return Piecewise((res, _my_unpolarify(cond)), (Integral(f, x), True))
@timeit
def meijerint_definite(f, x, a, b):
"""
Integrate ``f`` over the interval [``a``, ``b``], by rewriting it as a product
of two G functions, or as a single G function.
Return res, cond, where cond are convergence conditions.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_definite
>>> from sympy import exp, oo
>>> from sympy.abc import x
>>> meijerint_definite(exp(-x**2), x, -oo, oo)
(sqrt(pi), True)
This function is implemented as a succession of functions
meijerint_definite, _meijerint_definite_2, _meijerint_definite_3,
_meijerint_definite_4. Each function in the list calls the next one
(presumably) several times. This means that calling meijerint_definite
can be very costly.
"""
# This consists of three steps:
# 1) Change the integration limits to 0, oo
# 2) Rewrite in terms of G functions
# 3) Evaluate the integral
#
# There are usually several ways of doing this, and we want to try all.
# This function does (1), calls _meijerint_definite_2 for step (2).
from sympy import arg, exp, I, And, DiracDelta, count_ops
_debug('Integrating', f, 'wrt %s from %s to %s.' % (x, a, b))
if f.has(DiracDelta):
_debug('Integrand has DiracDelta terms - giving up.')
return None
f_, x_, a_, b_ = f, x, a, b
# Let's use a dummy in case any of the boundaries has x.
d = Dummy('x')
f = f.subs(x, d)
x = d
if a == b:
return (S.Zero, True)
results = []
if a == -oo and b != oo:
return meijerint_definite(f.subs(x, -x), x, -b, -a)
elif a == -oo:
# Integrating -oo to oo. We need to find a place to split the integral.
_debug(' Integrating -oo to +oo.')
innermost = _find_splitting_points(f, x)
_debug(' Sensible splitting points:', innermost)
for c in sorted(innermost, key=default_sort_key, reverse=True) + [S(0)]:
_debug(' Trying to split at', c)
if not c.is_real:
_debug(' Non-real splitting point.')
continue
res1 = _meijerint_definite_2(f.subs(x, x + c), x)
if res1 is None:
_debug(' But could not compute first integral.')
continue
res2 = _meijerint_definite_2(f.subs(x, c - x), x)
if res2 is None:
_debug(' But could not compute second integral.')
continue
res1, cond1 = res1
res2, cond2 = res2
cond = _condsimp(And(cond1, cond2))
if cond == False:
_debug(' But combined condition is always false.')
continue
res = res1 + res2
return res, cond
elif a == oo:
return -meijerint_definite(f, x, b, oo)
elif (a, b) == (0, oo):
# This is a common case - try it directly first.
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
else:
if b == oo:
for split in _find_splitting_points(f, x):
if (a - split >= 0) == True:
_debug('Trying x -> x + %s' % split)
res = _meijerint_definite_2(f.subs(x, x + split)
*Heaviside(x + split - a), x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
f = f.subs(x, x + a)
b = b - a
a = 0
if b != oo:
phi = exp(I*arg(b))
b = abs(b)
f = f.subs(x, phi*x)
f *= Heaviside(b - x)*phi
b = oo
_debug('Changed limits to', a, b)
_debug('Changed function to', f)
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
if f_.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_definite(
_rewrite_hyperbolics_as_exp(f_), x_, a_, b_)
if rv:
if not type(rv) is list:
rv = (collect(factor_terms(rv[0]), rv[0].atoms(exp)),) + rv[1:]
return rv
results.extend(rv)
if results:
return next(ordered(results))
def _guess_expansion(f, x):
""" Try to guess sensible rewritings for integrand f(x). """
from sympy import expand_trig
from sympy.functions.elementary.trigonometric import TrigonometricFunction
res = [(f, 'original integrand')]
orig = res[-1][0]
saw = set([orig])
expanded = expand_mul(orig)
if expanded not in saw:
res += [(expanded, 'expand_mul')]
saw.add(expanded)
expanded = expand(orig)
if expanded not in saw:
res += [(expanded, 'expand')]
saw.add(expanded)
if orig.has(TrigonometricFunction, HyperbolicFunction):
expanded = expand_mul(expand_trig(orig))
if expanded not in saw:
res += [(expanded, 'expand_trig, expand_mul')]
saw.add(expanded)
return res
def _meijerint_definite_2(f, x):
"""
Try to integrate f dx from zero to infinty.
The body of this function computes various 'simplifications'
f1, f2, ... of f (e.g. by calling expand_mul(), trigexpand()
- see _guess_expansion) and calls _meijerint_definite_3 with each of
these in succession.
If _meijerint_definite_3 succeedes with any of the simplified functions,
returns this result.
"""
# This function does preparation for (2), calls
# _meijerint_definite_3 for (2) and (3) combined.
# use a positive dummy - we integrate from 0 to oo
# XXX if a nonnegative symbol is used there will be test failures
dummy = _dummy('x', 'meijerint-definite2', f, positive=True)
f = f.subs(x, dummy)
x = dummy
if f == 0:
return S(0), True
for g, explanation in _guess_expansion(f, x):
_debug('Trying', explanation)
res = _meijerint_definite_3(g, x)
if res:
return res
def _meijerint_definite_3(f, x):
"""
Try to integrate f dx from zero to infinity.
This function calls _meijerint_definite_4 to try to compute the
integral. If this fails, it tries using linearity.
"""
res = _meijerint_definite_4(f, x)
if res and res[1] != False:
return res
if f.is_Add:
_debug('Expanding and evaluating all terms.')
ress = [_meijerint_definite_4(g, x) for g in f.args]
if all(r is not None for r in ress):
conds = []
res = S(0)
for r, c in ress:
res += r
conds += [c]
c = And(*conds)
if c != False:
return res, c
def _my_unpolarify(f):
from sympy import unpolarify
return _eval_cond(unpolarify(f))
@timeit
def _meijerint_definite_4(f, x, only_double=False):
"""
Try to integrate f dx from zero to infinity.
This function tries to apply the integration theorems found in literature,
i.e. it tries to rewrite f as either one or a product of two G-functions.
The parameter ``only_double`` is used internally in the recursive algorithm
to disable trying to rewrite f as a single G-function.
"""
# This function does (2) and (3)
_debug('Integrating', f)
# Try single G function.
if not only_double:
gs = _rewrite1(f, x, recursive=False)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S(0)
for C, s, f in g:
if C == 0:
continue
C, f = _rewrite_saxena_1(fac*C, po*x**s, f, x)
res += C*_int0oo_1(f, x)
cond = And(cond, _check_antecedents_1(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitutions is:', res)
return _my_unpolarify(hyperexpand(res)), cond
# Try two G functions.
gs = _rewrite2(f, x)
if gs is not None:
for full_pb in [False, True]:
fac, po, g1, g2, cond = gs
_debug('Could rewrite as two G functions:', fac, po, g1, g2)
res = S(0)
for C1, s1, f1 in g1:
for C2, s2, f2 in g2:
r = _rewrite_saxena(fac*C1*C2, po*x**(s1 + s2),
f1, f2, x, full_pb)
if r is None:
_debug('Non-rational exponents.')
return
C, f1_, f2_ = r
_debug('Saxena subst for yielded:', C, f1_, f2_)
cond = And(cond, _check_antecedents(f1_, f2_, x))
if cond == False:
break
res += C*_int0oo(f1_, f2_, x)
else:
continue
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False (full_pb=%s).' % full_pb)
else:
_debug('Result before branch substitutions is:', res)
if only_double:
return res, cond
return _my_unpolarify(hyperexpand(res)), cond
def meijerint_inversion(f, x, t):
"""
Compute the inverse laplace transform
:math:\int_{c+i\infty}^{c-i\infty} f(x) e^{tx) dx,
for real c larger than the real part of all singularities of f.
Note that ``t`` is always assumed real and positive.
Return None if the integral does not exist or could not be evaluated.
Examples
========
>>> from sympy.abc import x, t
>>> from sympy.integrals.meijerint import meijerint_inversion
>>> meijerint_inversion(1/x, x, t)
Heaviside(t)
"""
from sympy import I, Integral, exp, expand, log, Add, Mul, Heaviside
f_ = f
t_ = t
t = Dummy('t', polar=True) # We don't want sqrt(t**2) = abs(t) etc
f = f.subs(t_, t)
c = Dummy('c')
_debug('Laplace-inverting', f)
if not _is_analytic(f, x):
_debug('But expression is not analytic.')
return None
# We filter out exponentials here. If we are given an Add this will not
# work, but the calling code will take care of that.
shift = 0
if f.is_Mul:
args = list(f.args)
newargs = []
exponentials = []
while args:
arg = args.pop()
if isinstance(arg, exp):
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
try:
a, b = _get_coeff_exp(arg.args[0], x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a)
else:
newargs.append(arg)
elif arg.is_Pow:
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
if x not in arg.base.free_symbols:
try:
a, b = _get_coeff_exp(arg.exp, x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a*log(arg.base))
newargs.append(arg)
else:
newargs.append(arg)
shift = Add(*exponentials)
f = Mul(*newargs)
gs = _rewrite1(f, x)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S(0)
for C, s, f in g:
C, f = _rewrite_inversion(fac*C, po*x**s, f, x)
res += C*_int_inversion(f, x, t)
cond = And(cond, _check_antecedents_inversion(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitution:', res)
res = _my_unpolarify(hyperexpand(res))
if not res.has(Heaviside):
res *= Heaviside(t)
res = res.subs(t, t + shift)
if not isinstance(cond, bool):
cond = cond.subs(t, t + shift)
return Piecewise((res.subs(t, t_), cond),
(Integral(f_*exp(x*t), (x, c - oo*I, c + oo*I)).subs(t, t_), True))
|
google-code/android-scripting
|
refs/heads/master
|
python/gdata/src/gdata/Crypto/Cipher/__init__.py
|
271
|
"""Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
|
googleads/google-ads-python
|
refs/heads/master
|
google/ads/googleads/v7/services/types/bidding_strategy_simulation_service.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.services",
marshal="google.ads.googleads.v7",
manifest={"GetBiddingStrategySimulationRequest",},
)
class GetBiddingStrategySimulationRequest(proto.Message):
r"""Request message for
[BiddingStrategySimulationService.GetBiddingStrategySimulation][google.ads.googleads.v7.services.BiddingStrategySimulationService.GetBiddingStrategySimulation].
Attributes:
resource_name (str):
Required. The resource name of the bidding
strategy simulation to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
jparyani/Mailpile
|
refs/heads/sandstorm
|
mailpile/__main__.py
|
3
|
import sys
from mailpile.app import Main
def main():
Main(sys.argv[1:])
if __name__ == "__main__":
main()
|
blast-hardcheese/pvpgn
|
refs/heads/master
|
scripts/ladder.py
|
14
|
#!/usr/bin/env python
# -*- Mode: Python; tab-width: 4 -*-
#
# Copyright (C) 2001 Gianluigi Tiesi <sherpya@netfarm.it>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Ranking patch by <JEBs@shbe.net> 20020503
# Small "unsigned int" fix by <JEBs@shbe.net> 20020818
#
# ==========================================================================
__version__ = "0.9"
### Only if CGI_MODE = 1
CGI_MODE=0
FILE="/opt/bnetd/var/ladders/ladder.D2DV"
MAX=100
from struct import unpack,calcsize
from string import find,split,join
from os import stat
from sys import argv,exit,stdout
from getopt import getopt
#### Templates
modes = [ 'html', 'ansi', 'ascii', 'python' ]
templates = {}
for m in modes:
templates[m] = {}
### html ###
#
templates['html']['header']="""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>D2 Closed Realm Ladder</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body bgcolor="#000000" text="#ffff00">
<h2 style="color: lightgreen;" align="center">D2 Closed Realm Ladder</h2>
<table style="border: solid lightblue; border-width: 1px;" align="center" border="0" width="80%" summary="">
"""
#
templates['html']['footer']=""" </table>
<p style="color: lightblue;" align="center">Generated by ladder.py v %s - © 2001 <a style="color: lightgreen;" href="mailto:sherpya@netfarm.it">Sherpya</a></p>
</body>
</html>
""" % __version__
# %s for description of ladder type
templates['html']['summary'] = """ <tr style="color: lightblue" bgcolor="#666666"><th colspan="5">Ladder for %s</th></tr>
"""
#
templates['html']['tbheader'] = """<tr style="color: lightgreen;"><th align="center">#</th><th align="left">Charname</th><th align="right">level</th><th align="center">class</th><th align="right">exp</th></tr>
"""
# %s for charname
templates['html']['normal'] = """%s"""
templates['html']['hardcore'] = { 0 : """<span style="color: red;">%s</span>""",
1 : """<span style="color: orange;">%s</span>""" }
# %s charname - %d level - %s class - %d experience
templates['html']['entry'] = """<tr bgcolor="#222222"><td align="right">%d</td><td align="left">%s</td><td align="right">%d</td><td align="center">%s</td><td align="right">%d</td></tr>
"""
#
templates['html']['separator'] = """<tr><td colspan="5"> </td></tr>
"""
#### html
#### ascii / ansi
line = '-' * 59 + '\n'
s10 = ' ' * 10
s14 = ' ' * 14
s5 = ' ' * 5
text = 'D2 Closed Ladder'
esc = '\033'
off = esc + '[0m'
colors = {
'grey': esc + '[1;30m',
'red': esc + '[1;31m',
'green': esc + '[1;32m',
'yellow': esc + '[1;33m',
'blue': esc + '[1;34m',
'purple': esc + '[1;35m',
'magenta': esc + '[1;36m',
'white': esc + '[1;37m',
'green': esc + '[1;32m'
}
templates['ascii']['header'] = line + (int((len(line) - len(text))/ 2)) * ' ' + text + '\n' + line
templates['ascii']['footer'] = 'generated by ladder.py (c) Sherpya [sherpya@netfarm.it]\n'
templates['ascii']['summary'] = 'Ladder for %s\n\n'
templates['ascii']['tbheader'] = ' # charname' + s14 + 'level' + s10 + 'class' + s10 + 'exp' + '\n\n'
templates['ascii']['normal'] = '%s'
templates['ascii']['hardcore'] = { 0 : '*%s', 1: '^%s' }
templates['ascii']['entry'] = '%3d %-23s %2d %16s %10d\n'
templates['ascii']['separator'] = line + '\n'
line = colors['blue'] + ( '-' * 59) + off + '\n'
templates['ansi']['header'] = line + (int((len(line) - len(text) - 10)/ 2)) * ' ' + colors['green'] + text + off + '\n' + line
templates['ansi']['footer'] = colors['green'] + 'generated by ' + colors['blue'] + 'ladder.py' + colors['green'] + ' (c) Sherpya [sherpya@netfarm.it]' + off + '\n'
templates['ansi']['summary'] = colors['white'] + 'Ladder for %s' + off + '\n\n'
templates['ansi']['tbheader'] = colors['green'] + ' # charname' + s14 + 'level' + s10 + 'class' + s10 + 'exp' + off + '\n\n'
templates['ansi']['normal'] = colors['yellow'] + '%s'
templates['ansi']['hardcore'] = { 0 : colors['red'] + '%s', 1: colors['grey'] + '%s' }
templates['ansi']['entry'] = colors['yellow'] + '%3d %-30s %2d %16s %10d' + off + '\n'
templates['ansi']['separator'] = line + '\n'
del text
#### ascii / ansi
### Some struct from d2cs/d2dbs source
#
# ladder header (4 + 4 = 8):
# bn_int maxtype
# bn_int checksum
LD_HEAD="<2i"
szLD_HEAD = calcsize(LD_HEAD)
#
# ladder info (4 + 2 + 1 + 1 + 16 = 24):
# bn_int experience
# bn_short status
# bn_byte level
# bn_byte class;
# char charname[16];
LD_INFO="<Ihbb16s"
szLD_INFO = calcsize(LD_INFO)
#
# ladder index (4 + 4 + 4 = 12):
# bn_int type
# bn_int offset
# bn_int number
LD_INDEX="<3i"
szLD_INDEX = calcsize(LD_INDEX)
## Status flags
S_INIT = 0x1
S_EXP = 0x20
S_HC = 0x04
S_DEAD = 0x08
classes = {
0x00 : ['Amazon', 'f'],
0x01 : ['Sorceress', 'f'],
0x02 : ['Necromancer', 'm'],
0x03 : ['Paladin', 'm'],
0x04 : ['Barbarian', 'm'],
0x05 : ['Druid', 'm'],
0x06 : ['Assassin', 'f']
}
desc = {
'nor': 'Diablo II',
'exp': 'Lord of Desctruction'
}
diff = {
'nor': {
0x1: { 0 : { 'm': 'Sir', 'f': 'Dame' },
1 : { 'm': 'Count', 'f': 'Countess' }
},
0x2: { 0 : { 'm': 'Lord', 'f': 'Lady' },
1 : { 'm': 'Duke', 'f': 'Duchess' }
},
0x3: { 0 : { 'm': 'Baron', 'f': 'Baroness' },
1 : { 'm': 'King', 'f': 'Queen' }
}
},
'exp': {
0x1: { 0 : { 'm': 'Slayer', 'f': 'Slayer' },
1 : { 'm': 'Destroyer', 'f': 'Destroyer' }
},
0x2: { 0 : { 'm': 'Champion', 'f': 'Champion' },
1 : { 'm': 'Conqueror', 'f': 'Conqueror' }
},
0x3: { 0 : { 'm': 'Patriarch', 'f': 'Matriarch' },
1 : { 'm': 'Guardian', 'f': 'Guardian' }
}
}
}
## Utils
def remove_null(text):
return split(text, chr(0))[0]
def get_ladder(file):
try:
size = stat(file)[6]
data = open(file, "rb")
except:
print "Error opening %s for read" % file
exit()
maxtype, checksum = unpack(LD_HEAD, data.read(szLD_HEAD))
size = size - szLD_HEAD
head = []
for i in range(maxtype):
type, offset, number = unpack(LD_INDEX, data.read(szLD_INDEX))
size = size - szLD_INDEX
head.append(
{
'type': type,
'offset': offset,
'number': number
})
ladder = {}
ladder['nor'] = []
ladder['exp'] = []
temp = {}
temp['nor'] = []
temp['exp'] = []
while size > 0:
try:
experience, status, level, _class, charname = unpack(LD_INFO, data.read(szLD_INFO))
except:
### Bad data
size = size - szLD_INFO
continue
size = size - szLD_INFO
## Avoid null chars
if not experience:
continue
charname = remove_null(charname)
died = 0
if status & S_EXP:
_type = 'exp'
difficulty = ((status >> 0x08) & 0x0f) / 5
else:
_type = 'nor'
difficulty = ((status >> 0x08) & 0x0f) / 5
if status & S_HC:
hc = 1
if status & S_DEAD:
died = 1
else:
hc = 0
c_class = classes[_class]
if difficulty and diff[_type].has_key(difficulty):
prefix = diff[_type][difficulty][hc][c_class[1]]
else:
prefix = None
char = (experience, {
'charname' : charname,
'prefix' : prefix,
'experience' : experience,
'class' : c_class[0],
'sex' : c_class[0],
'level' : level,
'type' : _type,
'difficulty' : difficulty,
'hc' : hc,
'died' : died
})
## Dupe char? why?
if char not in temp[_type]:
temp[_type].append(char)
data.close()
## Sorting by exp
temp['nor'].sort()
temp['nor'].reverse()
temp['exp'].sort()
temp['exp'].reverse()
for _type in temp.keys():
for ch in temp[_type]:
ladder[_type].append(ch[1])
del temp
return ladder
def generate(ladder, mode, output, max):
output.write(templates[mode]['header'])
for _type in ladder.keys():
count = 1
output.write(templates[mode]['summary'] % desc[_type])
output.write(templates[mode]['tbheader'])
for ch in ladder[_type]:
if ch['prefix']:
charname = "%s %s" % (ch['prefix'], ch['charname'])
else:
charname = ch['charname']
if ch['hc']:
charname = templates[mode]['hardcore'][ch['died']] % charname
else:
charname = templates[mode]['normal'] % charname
output.write(templates[mode]['entry'] % (count, charname, ch['level'], ch['class'], ch['experience']))
count = count + 1
if count > max:
break
output.write(templates[mode]['separator'])
output.write(templates[mode]['footer'])
def pickle_to(ladder, output):
try:
from cPickle import dump
except:
from pickle import dump
try:
out = open(output, "wb")
except:
print "Cannot open %s for pickle dump" % output
exit()
dump(ladder, out)
out.close()
### Main
### CGI MODE
if CGI_MODE:
print "Content-Type: text/html"
print
ladder = get_ladder(FILE)
generate(ladder, 'html', stdout, MAX)
exit()
args = argv[1:]
optlist, args = getopt(args, "hi:o:m:n:")
if len(args):
for bad in args:
print "%s: Unrecognized option %s" % (argv[0], bad)
exit()
### defaults
file = None
output = None # stdout
mode = modes[0]
real_max = 1000
max = 100
def show_help():
print
print "ladder.py v%s - (c) 2001 Sherpya <sherpya@netfarm.it>" % __version__
print "Usage: ladder.py -i ladder_file [-o outputfile] [-m mode] [-n max ladder chars]"
print
print " -i ladder_file, is the ladder file like ladder.D2DV"
print " -o output file, if omitted defaults to stdout"
print " -m mode, avaiables mode are: %s, defaults to %s" % (join(modes,', '), modes[0])
print " -n max_char, max char to display in each ladder, defaults to %d" % max
print
print " note: python output mode creates a python object usable by pickle module"
print
for opt in optlist:
# Help
if opt[0] == '-h':
show_help()
exit()
# Input file
if opt[0] == '-i':
file = opt[1]
continue
# Output file
if opt[0] == '-o':
output = opt[1]
continue
# Output mode (html, ansi, ascii, python)
if opt[0] == '-m':
if opt[1] in modes:
mode = opt[1]
continue
else:
print "Invalid mode %s, valid modes are %s" % (opt[1], join(modes, ', '))
exit()
# Max chars in ladder
if opt[0] == '-n':
try:
max = int(opt[1])
except:
max = 0
if (max < 2) or max > real_max:
print "Invalid value for max char in ladder must be > 1 and < %d" % real_max
exit()
continue
if not file:
show_help()
exit()
ladder = get_ladder(file)
if mode == 'python':
if output:
pickle_to(ladder, output)
else:
print "Cannot dump python object to stdout"
exit()
if output:
try:
output = open(output, "wb")
except:
print "Cannot open %s for writing" % output
exit()
else:
output = stdout
generate(ladder, mode, output, max)
|
abomyi/django
|
refs/heads/master
|
tests/indexes/tests.py
|
321
|
from unittest import skipUnless
from django.db import connection
from django.test import TestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1", "c2", "c3"),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_7ce4cc86123")
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[2])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [])
|
Chris--A/Arduino
|
refs/heads/master
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/request.py
|
245
|
# urllib3/request.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-orm-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
_encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
|
cheng10/M-ords
|
refs/heads/master
|
mords_backend/mords/views.py
|
1
|
from datetime import timedelta
from django.db import IntegrityError
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.views import generic
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
import random
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from mords_api.models import Note, Word, Learner, Entry, Book, LearningWord
from forms import UserForm, LearnerForm, PasswordForm
@login_required
def index(request):
learner = Learner.objects.get(user=request.user)
to_learn = learner.words_perDay - learner.words_finished
word_num = len(LearningWord.objects.filter(learner=learner).filter(lv__in=[1, 2]))
context = {
"learner": learner,
"to_learn": to_learn,
"word_num": word_num
}
return render(request, 'mords/index.html', context)
# class IndexView(generic.ListView):
# template_name = 'mords/index.html'
# context_object_name = 'latest_note_list'
#
# def get_queryset(self):
# """
# Return the last five published notes (not including those set to be
# published in the future).
#
# """
# return Note.objects.order_by('-pub_date')
# # return Note.objects.order_by('-pub_date')[:5]
# # return Note.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
def new(request):
latest_word_list = Word.objects.filter(update_date__gte=timezone.now()-timedelta(days=1))
paginator = Paginator(latest_word_list, 30) # Show 30 words per page
page = request.GET.get('page')
try:
words = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
words = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
words = paginator.page(paginator.num_pages)
return render(request, 'mords/new.html', {'latest_word_list': words})
# class NewView(generic.ListView):
# template_name = 'mords/new.html'
# context_object_name = 'latest_word_list'
#
# def get_queryset(self):
# return Word.objects.filter(update_date__lte=timezone.now()+timedelta(days=1))
def signup(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
learner_form = LearnerForm(data=request.POST)
if user_form.is_valid() and learner_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
learner = learner_form.save(commit=False)
learner.user = user
if 'pic' in request.FILES:
learner.pic = request.FILES['pic']
learner.save()
registered = True
else:
print(user_form.errors, learner_form.errors)
else:
user_form = UserForm()
learner_form = LearnerForm()
context = {
'user_form': user_form,
'learner_form': learner_form,
'registered': registered
}
return render(request,
'mords/signup.html',
context
)
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('mords:index'))
else:
return HttpResponse("Your account is disabled.")
else:
print("Invalid login details: {0}, {1}".format(username, password))
return HttpResponse("Invalid login details supplied.")
else:
return render(request, 'mords/login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('mords:index'))
@login_required
def update_profile(request):
if request.method == 'POST':
learner_form = LearnerForm(data=request.POST)
if learner_form.is_valid():
learner = Learner.objects.get(user=request.user)
learner.book = learner_form.cleaned_data['book']
learner.words_perDay = learner_form.cleaned_data['words_perDay']
if 'pic' in request.FILES:
learner.pic = request.FILES['pic']
learner.save()
else:
print(learner_form.errors)
else:
learner_form = LearnerForm()
learner = Learner.objects.get(user=request.user)
pass_form = PasswordForm()
context = {
'password_form': pass_form,
'learner_form': learner_form,
'learner': learner,
}
return render(request,
'mords/profile.html',
context
)
@login_required
def update_password(request):
if request.method == 'POST':
pass_form = PasswordForm(data=request.POST)
if pass_form.is_valid():
user = request.user
user.set_password(pass_form.cleaned_data['password'])
user.save()
info = 'Password Updated'
else:
print(pass_form.errors)
info = pass_form.errors
else:
info = ''
pass_form = PasswordForm()
learner = Learner.objects.get(user=request.user)
learner_form = LearnerForm()
context = {
'password_form': pass_form,
'learner_form': learner_form,
'learner': learner,
'info': info
}
return render(request,
'mords/profile.html',
context
)
def detail(request, word_id):
"""
Excludes any notes that aren't published yet.
:param request:
:param word_id:
:return:
"""
word = get_object_or_404(Word, id=word_id)
notes = word.note_set.all().filter(pub_date__lte=timezone.now())
paginator = Paginator(notes, 5) # Show 5 notes per page
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
notes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
notes = paginator.page(paginator.num_pages)
context = {
'word': word,
'notes': notes
}
return render(request, 'mords/detail.html', context)
def learn_res(request, word_id):
word = get_object_or_404(Word, id=word_id)
notes = word.note_set.all()
# notes = word.note_set.all().filter(pub_date__lte=timezone.now())
paginator = Paginator(notes, 5) # Show 5 notes per page
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
notes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
notes = paginator.page(paginator.num_pages)
context = {
'word': word,
'notes': notes
}
return render(request, 'mords/learn_res.html', context)
def cross_res(request, word_id):
word = get_object_or_404(Word, id=word_id)
notes = word.note_set.all()
# notes = word.note_set.all().filter(pub_date__lte=timezone.now())
paginator = Paginator(notes, 5) # Show 5 notes per page
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
notes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
notes = paginator.page(paginator.num_pages)
context = {
'word': word,
'notes': notes
}
return render(request, 'mords/cross_res.html', context)
@login_required
def learn(request):
learner = Learner.objects.get(user=request.user)
if learner.words_finished >= learner.words_perDay:
context = {
'error_message': "You have finished today's learning task. Congrats!",
'is_blank': True
}
return render(request, 'mords/learn.html', context)
elif LearningWord.objects.filter(learner=learner):
if len(LearningWord.objects.filter(learner=learner)) < Entry.objects.filter(book=learner.book):
entrys = Entry.objects.filter(book=learner.book).order_by('?')
i = 0
for entry in entrys:
l, created = LearningWord.objects.get_or_create(
learner=learner,
word=entry.word,
)
if created:
i += 1
if i > 2:
break
if LearningWord.objects.filter(learner=learner).order_by('lv')[0].lv == 0:
context = {
'error_message': "You have finished the current book. Choose a new one.",
'is_blank': True
}
return render(request, 'mords/learn.html', context)
elif learner.book:
entrys = Entry.objects.filter(book=learner.book).order_by('?')
i = 0
for entry in entrys:
l, created = LearningWord.objects.get_or_create(
learner=learner,
word=entry.word,
)
if created:
i += 1
if i > 2:
break
else:
context = {
'error_message': "No words to learn. Have you chose a book to work on?",
'is_blank': True
}
return render(request, 'mords/learn.html', context)
if random.random() > 0.7:
lword = LearningWord.objects.filter(learner=learner).filter(lv__in=[2, 3]).order_by('?')[0]
print('new word')
else:
lword = LearningWord.objects.filter(learner=learner).filter(lv__in=[2, 3]).order_by('-lv')[0]
print('old word')
notes = lword.word.note_set.all()
paginator = Paginator(notes, 5) # Show 5 notes per page
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
notes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
notes = paginator.page(paginator.num_pages)
context = {
'word': lword.word,
'notes': notes
}
return render(request, 'mords/learn.html', context)
def review(request):
learner = Learner.objects.get(user=request.user)
lwords = LearningWord.objects.filter(learner=learner).filter(lv__in=[1, 2]).order_by('-update_datetime')
paginator = Paginator(lwords, 30) # Show 30 entrys per page
page = request.GET.get('page')
try:
lwords = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
lwords = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
lwords = paginator.page(paginator.num_pages)
context = {
'lwords': lwords,
}
return render(request, 'mords/review.html', context)
def book_detail(request, book_name):
book = get_object_or_404(Book, name=book_name)
entrys = book.entry_set.all()
# notes = word.note_set.all().filter(pub_date__lte=timezone.now())
paginator = Paginator(entrys, 30) # Show 30 entrys per page
page = request.GET.get('page')
try:
entrys = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
entrys = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
entrys = paginator.page(paginator.num_pages)
context = {
'book': book,
'entrys': entrys
}
return render(request, 'mords/book_detail.html', context)
# return HttpResponse(reverse('mords:book_detail'))
@login_required
def comment(request, word_id):
word = get_object_or_404(Word, id=word_id)
try:
text = request.POST['note']
# except request.POST['note'] is None:
except KeyError:
return render(request, 'mords/detail.html',
{
'word': word,
'error_message': 'You did not enter a note.',
})
else:
try:
author = Learner.objects.get(user=request.user)
except Learner.DoesNotExist:
return render(request, 'mords/detail.html',
{
'word': word,
'error_message': 'Learner does not exist.',
})
pub_date = timezone.now()
Note.objects.create(
word=word,
pub_date=pub_date,
author=author,
text=text
)
return HttpResponseRedirect(reverse('mords:results', args=(word.id,)))
def results(request, word_id):
word = get_object_or_404(Word, id=word_id)
return render(request, 'mords/results.html', {'word': word})
def search(request):
query = request.GET.get('q')
words = Word.objects.filter(text__icontains=query)
paginator = Paginator(words, 30) # Show 30 words per page
page = request.GET.get('page')
try:
words = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
words = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
words = paginator.page(paginator.num_pages)
context = {
'words': words
}
return render(request, 'mords/search.html', context)
@login_required
def tick(request, word_id):
word = get_object_or_404(Word, id=word_id)
if request.method == 'POST':
learner = Learner.objects.get(user=request.user)
lword = LearningWord.objects.get(learner=learner, word=word)
if lword.lv > 0:
learner.words_finished += 1
learner.save()
lword.lv -= 1
lword.update_datetime = timezone.datetime.now()
lword.save()
else:
return HttpResponse("Does not support get method.")
return HttpResponseRedirect(reverse('mords:learn_res', args=(word.id,)))
@login_required
def cross(request, word_id):
word = get_object_or_404(Word, id=word_id)
if request.method == 'POST':
learner = Learner.objects.get(user=request.user)
if learner.words_finished >= 1:
learner.words_finished -= 1
learner.save()
lword = LearningWord.objects.get(learner=learner, word=word)
lword.lv = 3
lword.update_datetime = timezone.datetime.now()
lword.save()
else:
return HttpResponse("Does not support get method.")
return HttpResponseRedirect(reverse('mords:cross_res', args=(word.id,)))
@login_required
def cross2(request, word_id):
word = get_object_or_404(Word, id=word_id)
if request.method == 'POST':
learner = Learner.objects.get(user=request.user)
if learner.words_finished >= 1:
learner.words_finished -= 1
learner.save()
lword = LearningWord.objects.get(learner=learner, word=word)
lword.lv = 3
lword.update_datetime = timezone.datetime.now()
lword.save()
else:
return HttpResponse("Does not support get method.")
return HttpResponseRedirect(reverse('mords:learn'))
def latest_notes(request):
latest_note_list = Note.objects.order_by('-pub_date')[:5]
output = ', '.join([n.text for n in latest_note_list])
return HttpResponse(output)
|
JFriel/honours_project
|
refs/heads/master
|
networkx/networkx/drawing/tests/test_pylab.py
|
45
|
"""
Unit tests for matplotlib drawing functions.
"""
import os
from nose import SkipTest
import networkx as nx
class TestPylab(object):
@classmethod
def setupClass(cls):
global plt
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = False
except ImportError:
raise SkipTest('matplotlib not available.')
except RuntimeError:
raise SkipTest('matplotlib not available.')
def setUp(self):
self.G=nx.barbell_graph(5,10)
def test_draw(self):
try:
N=self.G
nx.draw_spring(N)
plt.savefig("test.ps")
nx.draw_random(N)
plt.savefig("test.ps")
nx.draw_circular(N)
plt.savefig("test.ps")
nx.draw_spectral(N)
plt.savefig("test.ps")
nx.draw_spring(N.to_directed())
plt.savefig("test.ps")
finally:
try:
os.unlink('test.ps')
except OSError:
pass
|
heke123/chromium-crosswalk
|
refs/heads/master
|
tools/ipc_fuzzer/scripts/play_testcase.py
|
40
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around chrome.
Replaces all the child processes (renderer, GPU, plugins and utility) with the
IPC fuzzer. The fuzzer will then play back a specified testcase.
Depends on ipc_fuzzer being available on the same directory as chrome.
"""
import argparse
import os
import platform
import subprocess
import sys
CHROME_BINARY_FOR_PLATFORM_DICT = {
'LINUX': 'chrome',
'MAC': 'Chromium.app/Contents/MacOS/Chromium',
'WINDOWS': 'chrome.exe',
}
def GetPlatform():
platform = None
if sys.platform.startswith('win'):
platform = 'WINDOWS'
elif sys.platform.startswith('linux'):
platform = 'LINUX'
elif sys.platform == 'darwin':
platform = 'MAC'
assert platform is not None
return platform
def main():
desc = 'Wrapper to run chrome with child processes replaced by IPC fuzzers'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--out-dir', dest='out_dir', default='out',
help='output directory under src/ directory')
parser.add_argument('--build-type', dest='build_type', default='Release',
help='Debug vs. Release build')
parser.add_argument('--gdb-browser', dest='gdb_browser', default=False,
action='store_true',
help='run browser process inside gdb')
parser.add_argument('testcase',
help='IPC file to be replayed')
parser.add_argument('chrome_args',
nargs=argparse.REMAINDER,
help='any additional arguments are passed to chrome')
args = parser.parse_args()
platform = GetPlatform()
chrome_binary = CHROME_BINARY_FOR_PLATFORM_DICT[platform]
fuzzer_binary = 'ipc_fuzzer_replay'
if platform == 'WINDOWS':
fuzzer_binary += '.exe'
script_path = os.path.realpath(__file__)
ipc_fuzzer_dir = os.path.join(os.path.dirname(script_path), os.pardir)
src_dir = os.path.abspath(os.path.join(ipc_fuzzer_dir, os.pardir, os.pardir))
out_dir = os.path.join(src_dir, args.out_dir)
build_dir = os.path.join(out_dir, args.build_type)
chrome_path = os.path.join(build_dir, chrome_binary)
if not os.path.exists(chrome_path):
print 'chrome executable not found at ', chrome_path
return 1
fuzzer_path = os.path.join(build_dir, fuzzer_binary)
if not os.path.exists(fuzzer_path):
print 'fuzzer executable not found at ', fuzzer_path
print ('ensure GYP_DEFINES="enable_ipc_fuzzer=1" and build target ' +
fuzzer_binary + '.')
return 1
prefixes = {
'--renderer-cmd-prefix',
'--gpu-launcher',
'--plugin-launcher',
'--ppapi-plugin-launcher',
'--utility-cmd-prefix',
}
chrome_command = [
chrome_path,
'--ipc-fuzzer-testcase=' + args.testcase,
'--no-sandbox',
'--disable-kill-after-bad-ipc',
'--disable-mojo-channel',
]
if args.gdb_browser:
chrome_command = ['gdb', '--args'] + chrome_command
launchers = {}
for prefix in prefixes:
launchers[prefix] = fuzzer_path
for arg in args.chrome_args:
if arg.find('=') != -1:
switch, value = arg.split('=', 1)
if switch in prefixes:
launchers[switch] = value + ' ' + launchers[switch]
continue
chrome_command.append(arg)
for switch, value in launchers.items():
chrome_command.append(switch + '=' + value)
command_line = ' '.join(['\'' + arg + '\'' for arg in chrome_command])
print 'Executing: ' + command_line
return subprocess.call(chrome_command)
if __name__ == "__main__":
sys.exit(main())
|
smkr/pyclipse
|
refs/heads/master
|
plugins/org.python.pydev/tests/pysrc/extendable/dependencies/file4.py
|
11
|
from file2 import * #generates dependency only if the module itself is removed, because we use no other tokens
|
CeltonMcGrath/TACTIC
|
refs/heads/master
|
3rd_party/CherryPy/cherrypy/test/logtest.py
|
12
|
"""logtest, a unittest.TestCase helper for testing log output."""
import sys
import time
import cherrypy
try:
# On Windows, msvcrt.getch reads a single char without output.
import msvcrt
def getchar():
return msvcrt.getch()
except ImportError:
# Unix getchr
import tty, termios
def getchar():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class LogCase(object):
"""unittest.TestCase mixin for testing log messages.
logfile: a filename for the desired log. Yes, I know modes are evil,
but it makes the test functions so much cleaner to set this once.
lastmarker: the last marker in the log. This can be used to search for
messages since the last marker.
markerPrefix: a string with which to prefix log markers. This should be
unique enough from normal log output to use for marker identification.
"""
logfile = None
lastmarker = None
markerPrefix = "test suite marker: "
def _handleLogError(self, msg, data, marker, pattern):
print("")
print(" ERROR: %s" % msg)
if not self.interactive:
raise self.failureException(msg)
p = " Show: [L]og [M]arker [P]attern; [I]gnore, [R]aise, or sys.e[X]it >> "
print p,
# ARGH
sys.stdout.flush()
while True:
i = getchar().upper()
if i not in "MPLIRX":
continue
print(i.upper()) # Also prints new line
if i == "L":
for x, line in enumerate(data):
if (x + 1) % self.console_height == 0:
# The \r and comma should make the next line overwrite
print "<-- More -->\r",
m = getchar().lower()
# Erase our "More" prompt
print " \r",
if m == "q":
break
print(line.rstrip())
elif i == "M":
print(repr(marker or self.lastmarker))
elif i == "P":
print(repr(pattern))
elif i == "I":
# return without raising the normal exception
return
elif i == "R":
raise self.failureException(msg)
elif i == "X":
self.exit()
print p,
def exit(self):
sys.exit()
def emptyLog(self):
"""Overwrite self.logfile with 0 bytes."""
open(self.logfile, 'wb').write("")
def markLog(self, key=None):
"""Insert a marker line into the log and set self.lastmarker."""
if key is None:
key = str(time.time())
self.lastmarker = key
open(self.logfile, 'ab+').write("%s%s\n" % (self.markerPrefix, key))
def _read_marked_region(self, marker=None):
"""Return lines from self.logfile in the marked region.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be returned.
"""
## # Give the logger time to finish writing?
## time.sleep(0.5)
logfile = self.logfile
marker = marker or self.lastmarker
if marker is None:
return open(logfile, 'rb').readlines()
data = []
in_region = False
for line in open(logfile, 'rb'):
if in_region:
if (line.startswith(self.markerPrefix) and not marker in line):
break
else:
data.append(line)
elif marker in line:
in_region = True
return data
def assertInLog(self, line, marker=None):
"""Fail if the given (partial) line is not in the log.
The log will be searched from the given marker to the next marker.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be searched.
"""
data = self._read_marked_region(marker)
for logline in data:
if line in logline:
return
msg = "%r not found in log" % line
self._handleLogError(msg, data, marker, line)
def assertNotInLog(self, line, marker=None):
"""Fail if the given (partial) line is in the log.
The log will be searched from the given marker to the next marker.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be searched.
"""
data = self._read_marked_region(marker)
for logline in data:
if line in logline:
msg = "%r found in log" % line
self._handleLogError(msg, data, marker, line)
def assertLog(self, sliceargs, lines, marker=None):
"""Fail if log.readlines()[sliceargs] is not contained in 'lines'.
The log will be searched from the given marker to the next marker.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be searched.
"""
data = self._read_marked_region(marker)
if isinstance(sliceargs, int):
# Single arg. Use __getitem__ and allow lines to be str or list.
if isinstance(lines, (tuple, list)):
lines = lines[0]
if lines not in data[sliceargs]:
msg = "%r not found on log line %r" % (lines, sliceargs)
self._handleLogError(msg, [data[sliceargs]], marker, lines)
else:
# Multiple args. Use __getslice__ and require lines to be list.
if isinstance(lines, tuple):
lines = list(lines)
elif isinstance(lines, basestring):
raise TypeError("The 'lines' arg must be a list when "
"'sliceargs' is a tuple.")
start, stop = sliceargs
for line, logline in zip(lines, data[start:stop]):
if line not in logline:
msg = "%r not found in log" % line
self._handleLogError(msg, data[start:stop], marker, line)
|
ltiao/project-euler
|
refs/heads/master
|
common/problem2.py
|
1
|
#
# Fast doubling Fibonacci algorithm
#
# Copyright (c) 2013 Nayuki Minase
# All rights reserved. Contact Nayuki for licensing.
# http://nayuki.eigenstate.org/page/fast-fibonacci-algorithms
#
# Returns F(n)
def fibonacci(n):
if n < 0:
raise ValueError("Negative arguments not implemented")
return _fib(n)[0]
# Returns a tuple (F(n), F(n+1))
def _fib(n):
if n == 0:
return (0, 1)
else:
a, b = _fib(n // 2)
c = a * (2 * b - a)
d = b * b + a * a
if n % 2 == 0:
return (c, d)
else:
return (d, c + d)
print [fibonacci(i) for i in xrange(10)]
|
OTL/arucopy
|
refs/heads/master
|
setup.py
|
1
|
from distutils.core import setup, Extension
# define the name of the extension to use
extension_name = 'arucopy'
extension_version = '1.0'
# define the directories to search for include files
# to get this to work, you may need to include the path
# to your boost installation. Mine was in
# '/usr/local/include', hence the corresponding entry.
include_dirs = []
# define the library directories to include any extra
# libraries that may be needed. The boost::python
# library for me was located in '/usr/local/lib'
library_dirs = []
# define the libraries to link with the boost python library
libraries = [ 'boost_python', 'opencv_video', 'aruco' ]
# define the source files for the extension
source_files = [ 'arucopy.cpp' ]
# create the extension and add it to the python distribution
setup(name=extension_name,
version=extension_version,
ext_modules=[Extension(
extension_name, source_files, include_dirs=include_dirs,
library_dirs=library_dirs, libraries=libraries)]
)
|
charukiewicz/beer-manager
|
refs/heads/master
|
venv/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
Titulacion-Sistemas/PythonTitulacion-EV
|
refs/heads/master
|
Lib/site-packages/pip/commands/wheel.py
|
74
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.log import logger
from pip.exceptions import CommandError, PreviousBuildDirError
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.util import normalize_path
from pip.wheel import WheelBuilder, wheel_setuptools_support, setuptools_requirement
from pip import cmdoptions
DEFAULT_WHEEL_DIR = os.path.join(normalize_path(os.curdir), 'wheelhouse')
class WheelCommand(Command):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not recompiling your software during every install.
For more details, see the wheel docs: http://wheel.readthedocs.org/en/latest.
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=DEFAULT_WHEEL_DIR,
help="Build wheels into <dir>, where the default is '<cwd>/wheelhouse'.")
cmd_opts.add_option(cmdoptions.use_wheel)
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.")
cmd_opts.add_option(cmdoptions.requirements)
cmd_opts.add_option(cmdoptions.download_cache)
cmd_opts.add_option(cmdoptions.no_deps)
cmd_opts.add_option(cmdoptions.build_dir)
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean)
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# confirm requirements
try:
import wheel.bdist_wheel
except ImportError:
raise CommandError("'pip wheel' requires bdist_wheel from the 'wheel' distribution.")
if not wheel_setuptools_support():
raise CommandError("'pip wheel' requires %s." % setuptools_requirement)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
finder = PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_mirrors=options.use_mirrors,
mirrors=options.mirrors,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_insecure=options.allow_insecure,
allow_all_external=options.allow_all_external,
allow_all_insecure=options.allow_all_insecure,
allow_all_prereleases=options.pre,
)
options.build_dir = os.path.abspath(options.build_dir)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=None,
download_dir=None,
download_cache=options.download_cache,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=True)
#parse args and/or requirements files
for name in args:
if name.endswith(".whl"):
logger.notify("ignoring %s" % name)
continue
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options):
if req.editable or (req.name is None and req.url.endswith(".whl")):
logger.notify("ignoring %s" % req.url)
continue
requirement_set.add_requirement(req)
#fail if no requirements
if not requirement_set.has_requirements:
opts = {'name': self.name}
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.error(msg)
return
try:
#build wheels
wb = WheelBuilder(
requirement_set,
finder,
options.wheel_dir,
build_options = options.build_options or [],
global_options = options.global_options or []
)
wb.build()
except PreviousBuildDirError:
return
finally:
if not options.no_clean:
requirement_set.cleanup_files()
|
Zanzibar82/script.module.urlresolver
|
refs/heads/master
|
lib/urlresolver/plugins/cloudy.py
|
4
|
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from urlresolver import common
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from lib import unwise
import urllib
class CloudyResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "cloudy.ec"
domains = [ "cloudy.ec", "cloudy.eu", "cloudy.sx", "cloudy.ch", "cloudy.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def __get_stream_url(self, media_id, filekey, error_num=0, error_url=None):
'''
Get stream url.
If previously found stream url is a dead link, add error params and try again
'''
if error_num > 0 and error_url:
_error_params = '&numOfErrors={0}&errorCode=404&errorUrl={1}'.format(
error_num,
urllib.quote_plus(error_url).replace('.', '%2E')
)
else:
_error_params = ''
#use api to find stream address
api_call = 'http://www.cloudy.ec/api/player.api.php?{0}&file={1}&key={2}{3}'.format(
'user=undefined&pass=undefined',
media_id,
urllib.quote_plus(filekey).replace('.', '%2E'),
_error_params
)
api_html = self.net.http_GET(api_call).content
rapi = re.search('url=(.+?)&title=', api_html)
if rapi:
return urllib.unquote(rapi.group(1))
return None
def __is_stream_url_active(self, web_url):
try:
header = self.net.http_HEAD(web_url)
if header.get_headers():
return True
return False
except:
return False
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
#grab stream details
html = self.net.http_GET(web_url).content
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "vars.key")
error_url = None
stream_url = None
# try to resolve 3 times then give up
for x in range(0, 2):
link = self.__get_stream_url(media_id, filekey,
error_num=x,
error_url=error_url)
if link:
active = self.__is_stream_url_active(link)
if active:
stream_url = urllib.unquote(link)
break;
else:
# link inactive
error_url = link
else:
# no link found
raise UrlResolver.ResolverError('File Not Found or removed')
if stream_url:
return stream_url
else:
raise UrlResolver.ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
return 'http://www.cloudy.ec/embed.php?id=%s' % media_id
def get_host_and_id(self, url):
r = re.search('(https?://(?:www\.|embed\.)cloudy\.(?:ec|eu|sx|ch|com))/(?:video/|embed\.php\?id=)([0-9a-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match('https?://(?:www\.|embed\.)cloudy\.(?:ec|eu|sx|ch|com)/(?:video/|embed\.php\?id=)([0-9a-z]+)', url) or 'cloudy.' in host
|
kakunbsc/enigma2.1
|
refs/heads/master
|
lib/python/Components/ActionMap.py
|
30
|
from enigma import eActionMap
class ActionMap:
def __init__(self, contexts = [ ], actions = { }, prio=0):
self.actions = actions
self.contexts = contexts
self.prio = prio
self.p = eActionMap.getInstance()
self.bound = False
self.exec_active = False
self.enabled = True
def setEnabled(self, enabled):
self.enabled = enabled
self.checkBind()
def doBind(self):
if not self.bound:
for ctx in self.contexts:
self.p.bindAction(ctx, self.prio, self.action)
self.bound = True
def doUnbind(self):
if self.bound:
for ctx in self.contexts:
self.p.unbindAction(ctx, self.action)
self.bound = False
def checkBind(self):
if self.exec_active and self.enabled:
self.doBind()
else:
self.doUnbind()
def execBegin(self):
self.exec_active = True
self.checkBind()
def execEnd(self):
self.exec_active = False
self.checkBind()
def action(self, context, action):
print " ".join(("action -> ", context, action))
if self.actions.has_key(action):
res = self.actions[action]()
if res is not None:
return res
return 1
else:
print "unknown action %s/%s! typo in keymap?" % (context, action)
return 0
def destroy(self):
pass
class NumberActionMap(ActionMap):
def action(self, contexts, action):
numbers = ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")
if (action in numbers and self.actions.has_key(action)):
res = self.actions[action](int(action))
if res is not None:
return res
return 1
else:
return ActionMap.action(self, contexts, action)
class HelpableActionMap(ActionMap):
"""An Actionmap which automatically puts the actions into the helpList.
Note that you can only use ONE context here!"""
# sorry for this complicated code.
# it's not more than converting a "documented" actionmap
# (where the values are possibly (function, help)-tuples)
# into a "classic" actionmap, where values are just functions.
# the classic actionmap is then passed to the ActionMap constructor,
# the collected helpstrings (with correct context, action) is
# added to the screen's "helpList", which will be picked up by
# the "HelpableScreen".
def __init__(self, parent, context, actions = { }, prio=0):
alist = [ ]
adict = { }
for (action, funchelp) in actions.iteritems():
# check if this is a tuple
if isinstance(funchelp, tuple):
alist.append((action, funchelp[1]))
adict[action] = funchelp[0]
else:
adict[action] = funchelp
ActionMap.__init__(self, [context], adict, prio)
parent.helpList.append((self, context, alist))
|
quheng/scikit-learn
|
refs/heads/master
|
sklearn/datasets/setup.py
|
306
|
import numpy
import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
varunagrawal/azure-services
|
refs/heads/master
|
varunagrawal/site-packages/django/contrib/localflavor/mk/forms.py
|
89
|
from __future__ import absolute_import
import datetime
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.mk.mk_choices import MK_MUNICIPALITIES
class MKIdentityCardNumberField(RegexField):
"""
A Macedonian ID card number. Accepts both old and new format.
"""
default_error_messages = {
'invalid': _(u'Identity card numbers must contain'
' either 4 to 7 digits or an uppercase letter and 7 digits.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = None
kwargs['max_length'] = 8
regex = ur'(^[A-Z]{1}\d{7}$)|(^\d{4,7}$)'
super(MKIdentityCardNumberField, self).__init__(regex, *args, **kwargs)
class MKMunicipalitySelect(Select):
"""
A form ``Select`` widget that uses a list of Macedonian municipalities as
choices. The label is the name of the municipality and the value
is a 2 character code for the municipality.
"""
def __init__(self, attrs=None):
super(MKMunicipalitySelect, self).__init__(attrs, choices = MK_MUNICIPALITIES)
class UMCNField(RegexField):
"""
A form field that validates input as a unique master citizen
number.
The format of the unique master citizen number has been kept the same from
Yugoslavia. It is still in use in other countries as well, it is not applicable
solely in Macedonia. For more information see:
https://secure.wikimedia.org/wikipedia/en/wiki/Unique_Master_Citizen_Number
A value will pass validation if it complies to the following rules:
* Consists of exactly 13 digits
* The first 7 digits represent a valid past date in the format DDMMYYY
* The last digit of the UMCN passes a checksum test
"""
default_error_messages = {
'invalid': _(u'This field should contain exactly 13 digits.'),
'date': _(u'The first 7 digits of the UMCN must represent a valid past date.'),
'checksum': _(u'The UMCN is not valid.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = None
kwargs['max_length'] = 13
super(UMCNField, self).__init__(r'^\d{13}$', *args, **kwargs)
def clean(self, value):
value = super(UMCNField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self._validate_date_part(value):
raise ValidationError(self.error_messages['date'])
if self._validate_checksum(value):
return value
else:
raise ValidationError(self.error_messages['checksum'])
def _validate_checksum(self, value):
a,b,c,d,e,f,g,h,i,j,k,l,K = [int(digit) for digit in value]
m = 11 - (( 7*(a+g) + 6*(b+h) + 5*(c+i) + 4*(d+j) + 3*(e+k) + 2*(f+l)) % 11)
if (m >= 1 and m <= 9) and K == m:
return True
elif m == 11 and K == 0:
return True
else:
return False
def _validate_date_part(self, value):
daypart, monthpart, yearpart = int(value[:2]), int(value[2:4]), int(value[4:7])
if yearpart >= 800:
yearpart += 1000
else:
yearpart += 2000
try:
date = datetime.datetime(year = yearpart, month = monthpart, day = daypart).date()
except ValueError:
return False
if date >= datetime.datetime.now().date():
return False
return True
|
cloudtek/dynamodb-odm
|
refs/heads/master
|
docs/en/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# DynamoDM documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DynamoDM'
copyright = u'copyrightplaceholderreplaceme'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0-alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'php'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DynamoDBODMdoc'
# If true, “Created using Sphinx” is shown in the HTML footer. Default is True.
html_show_sphinx = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DynamoDBODM.tex', u'DynamoDM Documentation',
u'Cloudtek, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
dawnpower/nova
|
refs/heads/master
|
nova/api/openstack/compute/contrib/flavormanage.py
|
3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import context as nova_context
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
if name is None:
msg = _("A valid name parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
flavorid = vals.get('id')
memory = vals.get('ram')
if memory is None:
msg = _("A valid ram parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
vcpus = vals.get('vcpus')
if vcpus is None:
msg = _("A valid vcpus parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
root_gb = vals.get('disk')
if root_gb is None:
msg = _("A valid disk parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.FlavorCreateFailed as exc:
raise webob.exc.HTTPInternalServerError(explanation=
exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
|
dfroger/myqueue
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
from myqueue import __version__
setup(
name = 'myqueue',
version = __version__,
url = 'https://github.com/dfroger/myqueue',
description = 'A queue implemented with 2 stacks, for demo purpose',
license = 'GPL2',
author = 'David Froger',
author_email = 'david.froger@mailoo.org',
packages = ['myqueue',],
)
|
trankmichael/scikit-learn
|
refs/heads/master
|
sklearn/_build_utils.py
|
280
|
"""
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from numpy.distutils.system_info import get_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
|
xyuanmu/XX-Net
|
refs/heads/master
|
python3.8.2/Lib/encodings/euc_jisx0213.py
|
816
|
#
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
TridevGuha/django
|
refs/heads/master
|
tests/sessions_tests/tests.py
|
24
|
import base64
import os
import shutil
import string
import sys
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_save_doesnt_clear_data(self):
self.session['a'] = 'b'
self.session.save()
self.assertEqual(self.session['a'], 'b')
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ''
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = '1234567'
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = '12345678'
self.assertEqual(self.session.session_key, '12345678')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
if self.backend is CookieSession:
raise unittest.SkipTest("Cookie backend doesn't have an external store to create records in.")
session = self.backend('someunknownkey')
session.load()
self.assertFalse(session.exists(session.session_key))
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, 'someunknownkey')
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = Session.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_DOMAIN='.example.local')
def test_session_delete_on_end_with_custom_domain(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header with a custom domain looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response['Vary'], 'Cookie')
def test_empty_session_saved(self):
""""
If a session is emptied of data but still has a key, it should still
be updated.
"""
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Set a session key and some data.
middleware.process_request(request)
request.session['foo'] = 'bar'
# Handle the response through the middleware.
response = middleware.process_response(request, response)
self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Empty the session data.
del request.session['foo']
# Handle the response through the middleware.
response = HttpResponse('Session test')
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
|
I-sektionen/i-portalen
|
refs/heads/master
|
wsgi/iportalen_django/thesis_portal/admin.py
|
1
|
from django.contrib import admin
from .models import Thesis_Article
from utils.admin import HiddenModelAdmin, iportalen_admin_site, iportalen_superadmin_site
iportalen_admin_site.register(Thesis_Article)
iportalen_superadmin_site.register(Thesis_Article)
|
tareqalayan/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_ssl_certificate.py
|
27
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_ssl_certificate import ArgumentSpec
from library.modules.bigip_ssl_certificate import ApiParameters
from library.modules.bigip_ssl_certificate import ModuleParameters
from library.modules.bigip_ssl_certificate import ModuleManager
from library.modules.bigip_ssl_certificate import HAS_F5SDK
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_ssl_certificate import ArgumentSpec
from ansible.modules.network.f5.bigip_ssl_certificate import ApiParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleManager
from ansible.modules.network.f5.bigip_ssl_certificate import HAS_F5SDK
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_cert(self):
cert_content = load_fixture('create_insecure_cert1.crt')
args = dict(
content=cert_content,
name="cert1",
partition="Common",
state="present",
password='password',
server='localhost',
user='admin'
)
p = ModuleParameters(params=args)
assert p.name == 'cert1'
assert p.filename == 'cert1.crt'
assert 'Signature Algorithm' in p.content
assert '-----BEGIN CERTIFICATE-----' in p.content
assert '-----END CERTIFICATE-----' in p.content
assert p.checksum == '1e55aa57ee166a380e756b5aa4a835c5849490fe'
assert p.state == 'present'
assert p.user == 'admin'
assert p.server == 'localhost'
assert p.password == 'password'
assert p.partition == 'Common'
def test_module_issuer_cert_key(self):
args = dict(
issuer_cert='foo',
partition="Common",
)
p = ModuleParameters(params=args)
assert p.issuer_cert == '/Common/foo.crt'
def test_api_issuer_cert_key(self):
args = load_fixture('load_sys_file_ssl_cert_with_issuer_cert.json')
p = ApiParameters(params=args)
assert p.issuer_cert == '/Common/intermediate.crt'
class TestCertificateManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_import_certificate_and_key_no_key_passphrase(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('cert1.crt'),
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_import_certificate_chain(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('chain1.crt'),
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
hubsaysnuaa/odoo
|
refs/heads/8.0
|
addons/sale/edi/__init__.py
|
454
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jjmiranda/edx-platform
|
refs/heads/master
|
lms/djangoapps/notification_prefs/views.py
|
163
|
from base64 import urlsafe_b64encode, urlsafe_b64decode
from hashlib import sha256
import json
from Crypto.Cipher import AES
from Crypto import Random
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_GET, require_POST
from edxmako.shortcuts import render_to_response
from notification_prefs import NOTIFICATION_PREF_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference
class UsernameDecryptionException(Exception):
pass
class UsernameCipher(object):
"""
A transformation of a username to/from an opaque token
The purpose of the token is to make one-click unsubscribe links that don't
require the user to log in. To prevent users from unsubscribing other users,
we must ensure the token cannot be computed by anyone who has this
source code. The token must also be embeddable in a URL.
Thus, we take the following steps to encode (and do the inverse to decode):
1. Pad the UTF-8 encoding of the username with PKCS#7 padding to match the
AES block length
2. Generate a random AES block length initialization vector
3. Use AES-256 (with a hash of settings.SECRET_KEY as the encryption key)
in CBC mode to encrypt the username
4. Prepend the IV to the encrypted value to allow for initialization of the
decryption cipher
5. base64url encode the result
"""
@staticmethod
def _get_aes_cipher(initialization_vector):
hash_ = sha256()
hash_.update(settings.SECRET_KEY)
return AES.new(hash_.digest(), AES.MODE_CBC, initialization_vector)
@staticmethod
def _add_padding(input_str):
"""Return `input_str` with PKCS#7 padding added to match AES block length"""
padding_len = AES.block_size - len(input_str) % AES.block_size
return input_str + padding_len * chr(padding_len)
@staticmethod
def _remove_padding(input_str):
"""Return `input_str` with PKCS#7 padding trimmed to match AES block length"""
num_pad_bytes = ord(input_str[-1])
if num_pad_bytes < 1 or num_pad_bytes > AES.block_size or num_pad_bytes >= len(input_str):
raise UsernameDecryptionException("padding")
return input_str[:-num_pad_bytes]
@staticmethod
def encrypt(username):
initialization_vector = Random.new().read(AES.block_size)
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
return urlsafe_b64encode(
initialization_vector +
aes_cipher.encrypt(UsernameCipher._add_padding(username.encode("utf-8")))
)
@staticmethod
def decrypt(token):
try:
base64_decoded = urlsafe_b64decode(token)
except TypeError:
raise UsernameDecryptionException("base64url")
if len(base64_decoded) < AES.block_size:
raise UsernameDecryptionException("initialization_vector")
initialization_vector = base64_decoded[:AES.block_size]
aes_encrypted = base64_decoded[AES.block_size:]
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
try:
decrypted = aes_cipher.decrypt(aes_encrypted)
except ValueError:
raise UsernameDecryptionException("aes")
return UsernameCipher._remove_padding(decrypted)
def enable_notifications(user):
"""
Enable notifications for a user.
Currently only used for daily forum digests.
"""
# Calling UserPreference directly because this method is called from a couple of places,
# and it is not clear that user is always the user initiating the request.
UserPreference.objects.get_or_create(
user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
}
)
@require_POST
def ajax_enable(request):
"""
A view that enables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error. If notifications were already enabled for this
user, this has no effect. Otherwise, a preference is created with the
unsubscribe token (an encryption of the username) as the value.username
"""
if not request.user.is_authenticated():
raise PermissionDenied
enable_notifications(request.user)
return HttpResponse(status=204)
@require_POST
def ajax_disable(request):
"""
A view that disables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
delete_user_preference(request.user, NOTIFICATION_PREF_KEY)
return HttpResponse(status=204)
@require_GET
def ajax_status(request):
"""
A view that retrieves notifications status for the authenticated user.
This view should be invoked by an AJAX GET call. It returns status 200,
with a JSON-formatted payload, or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
qs = UserPreference.objects.filter(
user=request.user,
key=NOTIFICATION_PREF_KEY
)
return HttpResponse(json.dumps({"status": len(qs)}), content_type="application/json")
@require_GET
def set_subscription(request, token, subscribe): # pylint: disable=unused-argument
"""
A view that disables or re-enables notifications for a user who may not be authenticated
This view is meant to be the target of an unsubscribe link. The request
must be a GET, and the `token` parameter must decrypt to a valid username.
The subscribe flag feature controls whether the view subscribes or unsubscribes the user, with subscribe=True
used to "undo" accidentally clicking on the unsubscribe link
A 405 will be returned if the request method is not GET. A 404 will be
returned if the token parameter does not decrypt to a valid username. On
success, the response will contain a page indicating success.
"""
try:
username = UsernameCipher().decrypt(token.encode())
user = User.objects.get(username=username)
except UnicodeDecodeError:
raise Http404("base64url")
except UsernameDecryptionException as exn:
raise Http404(exn.message)
except User.DoesNotExist:
raise Http404("username")
# Calling UserPreference directly because the fact that the user is passed in the token implies
# that it may not match request.user.
if subscribe:
UserPreference.objects.get_or_create(user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
})
return render_to_response("resubscribe.html", {'token': token})
else:
UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).delete()
return render_to_response("unsubscribe.html", {'token': token})
|
Sorsly/subtle
|
refs/heads/master
|
google-cloud-sdk/lib/surface/kms/cryptokeys/versions/list.py
|
1
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List the versions within a CryptoKey."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kms import flags
from googlecloudsdk.core import resources
class List(base.ListCommand):
r"""List the versions within a CryptoKey.
Lists all of the CryptoKeyVersions within the given CryptoKey.
## EXAMPLES
The following command lists all CryptoKeyVersions within the
CryptoKey `frodo`, KeyRing `fellowship`, and Location `global`:
$ {command} --location global \
--keyring fellowship \
--cryptokey frodo
"""
def Collection(self):
return flags.CRYPTO_KEY_VERSION_COLLECTION
def GetUriFunc(self):
return cloudkms_base.MakeGetUriFunc(self)
def Run(self, args):
# pylint: disable=line-too-long
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = resources.REGISTRY.Create(flags.CRYPTO_KEY_COLLECTION)
request = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListRequest(
parent=crypto_key_ref.RelativeName())
return list_pager.YieldFromList(
client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions,
request,
field='cryptoKeyVersions',
limit=args.limit,
batch_size_attribute='pageSize')
|
filemakergarage/zeroclient
|
refs/heads/master
|
docs/conf.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ZeroClient documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 9 20:21:11 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ZeroClient'
copyright = '2017, Nils Waldherr'
author = 'Nils Waldherr'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZeroClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ZeroClient.tex', 'ZeroClient Documentation',
'Nils Waldherr', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zeroclient', 'ZeroClient Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ZeroClient', 'ZeroClient Documentation',
author, 'ZeroClient', 'One line description of project.',
'Miscellaneous'),
]
|
mozilla/make.mozilla.org
|
refs/heads/master
|
vendor-local/lib/python/requests/packages/urllib3/packages/mimetools_choose_boundary/__init__.py
|
55
|
"""The function mimetools.choose_boundary() from Python 2.7, which seems to
have disappeared in Python 3 (although email.generator._make_boundary() might
work as a replacement?).
Tweaked to use lock from threading rather than thread.
"""
import os
from threading import Lock
_counter_lock = Lock()
_counter = 0
def _get_next_counter():
global _counter
with _counter_lock:
_counter += 1
return _counter
_prefix = None
def choose_boundary():
"""Return a string usable as a multipart boundary.
The string chosen is unique within a single program run, and
incorporates the user id (if available), process id (if available),
and current time. So it's very unlikely the returned string appears
in message text, but there's no guarantee.
The boundary contains dots so you have to quote it in the header."""
global _prefix
import time
if _prefix is None:
import socket
try:
hostid = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
hostid = '127.0.0.1'
try:
uid = repr(os.getuid())
except AttributeError:
uid = '1'
try:
pid = repr(os.getpid())
except AttributeError:
pid = '1'
_prefix = hostid + '.' + uid + '.' + pid
return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())
|
gregdek/ansible
|
refs/heads/devel
|
lib/ansible/plugins/cache/base.py
|
232
|
# (c) 2017, ansible by Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# moved actual classes to __init__ kept here for backward compat with 3rd parties
from ansible.plugins.cache import BaseCacheModule, BaseFileCacheModule
|
EiSandi/greetingslack
|
refs/heads/master
|
greetingslack/lib/python2.7/site-packages/pip/_vendor/distro.py
|
330
|
# Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
It is a renewed alternative implementation for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
3.5 deprecated this function, and Python 3.7 is expected to remove it
altogether. Its predecessor function :py:func:`platform.dist` was already
deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
Still, there are many cases in which access to Linux distribution information
is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
more information.
"""
import os
import re
import sys
import json
import shlex
import logging
import subprocess
if not sys.platform.startswith('linux'):
raise ImportError('Unsupported platform: {0}'.format(sys.platform))
_UNIXCONFDIR = '/etc'
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6.7
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current Linux distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the Linux distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular Linux distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current Linux distribution, as a
machine-readable string.
For a number of Linux distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the Linux distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (Core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current Linux distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current Linux distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current Linux distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current Linux
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current Linux distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current Linux distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current Linux distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
class LinuxDistribution(object):
"""
Provides information about a Linux distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current Linux distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file=''):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self._os_release_info = self._get_os_release_info()
self._lsb_release_info = self._get_lsb_release_info() \
if include_lsb else {}
self._distro_release_info = self._get_distro_release_info()
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={0!r}, " \
"distro_release_file={1!r}, " \
"_os_release_info={2!r}, " \
"_lsb_release_info={3!r}, " \
"_distro_release_info={4!r})".format(
self.os_release_file,
self.distro_release_file,
self._os_release_info,
self._lsb_release_info,
self._distro_release_info)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the Linux distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the Linux distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the Linux distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the Linux distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', '')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the Linux distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the Linux distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the Linux distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the Linux distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the Linux distribution.
For details, see :func:`distro.codename`.
"""
return self.os_release_attr('codename') \
or self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the Linux
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the Linux distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the Linux
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the Linux
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the Linux distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the Linux distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the Linux distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def _get_os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
if k == 'VERSION':
# this handles cases in which the codename is in
# the `(CODENAME)` (rhel, centos, fedora) format
# or in the `, CODENAME` format (Ubuntu).
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
else:
props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
return props
def _get_lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
cmd = 'lsb_release -a'
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout, stderr = stdout.decode('utf-8'), stderr.decode('utf-8')
code = process.returncode
if code == 0:
content = stdout.splitlines()
return self._parse_lsb_release_content(content)
elif code == 127: # Command not found
return {}
else:
if sys.version_info[:2] >= (3, 5):
raise subprocess.CalledProcessError(code, cmd, stdout, stderr)
elif sys.version_info[:2] >= (2, 7):
raise subprocess.CalledProcessError(code, cmd, stdout)
elif sys.version_info[:2] == (2, 6):
raise subprocess.CalledProcessError(code, cmd)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
line = line.decode('utf-8') if isinstance(line, bytes) else line
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
def _get_distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(filepath):
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="Linux distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
if distribution_version:
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
if distribution_codename:
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main()
|
vFense/vFenseAgent-nix
|
refs/heads/development
|
agent/deps/rpm6/Python-2.7.5/lib/python2.7/urllib2.py
|
74
|
"""An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
urlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
exceptions:
URLError -- A subclass of IOError, individual protocols have their own
specific subclass.
HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response.
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib2
# set up authentication info
authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
# install it
urllib2.install_opener(opener)
f = urllib2.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import base64
import hashlib
import httplib
import mimetools
import os
import posixpath
import random
import re
import socket
import sys
import time
import urlparse
import bisect
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from urllib import (unwrap, unquote, splittype, splithost, quote,
addinfourl, splitport, splittag, toBytes,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies, proxy_bypass
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
global _opener
if _opener is None:
_opener = build_opener()
return _opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 fo discussion.
@property
def reason(self):
return self.msg
def info(self):
return self.hdrs
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.__original = unwrap(url)
self.__original, self.__fragment = splittag(self.__original)
self.type = None
# self.__r_type is what's left after doing the splittype
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr[:12] == '_Request__r_':
name = attr[12:]
if hasattr(Request, 'get_' + name):
getattr(self, 'get_' + name)()
return getattr(self, attr)
raise AttributeError, attr
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
# XXX these helper methods are lame
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.__fragment:
return '%s#%s' % (self.__original, self.__fragment)
else:
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, "unknown url type: %s" % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
return self.__r_host
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# self.handlers is retained only for backward compatibility
self.handlers = []
# manage the individual handlers
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
# accept a URL or a Request object
if isinstance(fullurl, basestring):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.add_data(data)
req.timeout = timeout
protocol = req.get_type()
# pre-process request
meth_name = protocol+"_request"
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
# post-process response
meth_name = protocol+"_response"
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable, HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses."""
handler_order = 1000 # after all other processing
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
return Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
# Implementation note: To avoid the server sending us into an
# infinite loop, the request object needs to track what URLs we
# have already seen. Do this by adding a handler-specific
# attribute to the Request object.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
# fix a possible malformed URL
urlparts = urlparse.urlparse(newurl)
if not urlparts.path:
urlparts = list(urlparts)
urlparts[2] = "/"
newurl = urlparse.urlunparse(urlparts)
newurl = urlparse.urljoin(req.get_full_url(), newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise HTTPError(newurl, code,
msg + " - Redirection to url '%s' is not allowed" %
newurl,
headers, fp)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req, timeout=req.timeout)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\']?)([^"\']*)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
self.retried = 0
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if self.retried > 5:
# retry sending the username:password 5 times before failing.
raise HTTPError(req.get_full_url(), 401, "basic auth failed",
headers, None)
else:
self.retried += 1
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if quote not in ['"', "'"]:
warnings.warn("Basic Auth Realm was unquoted",
UserWarning, 2)
if scheme.lower() == 'basic':
response = self.retry_http_basic_auth(host, req, realm)
if response and response.code != 401:
self.retried = 0
return response
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.headers.get(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.reset_retry_count()
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
response = self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
self.reset_retry_count()
return response
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host, timeout=req.timeout) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.unredirected_hdrs)
headers.update(dict((k, v) for k, v in req.headers.items()
if k not in headers))
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = "Proxy-Authorization"
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
# Proxy-Authorization should not be sent to origin
# server.
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
except socket.error, err: # XXX what error?
h.close()
raise URLError(err)
else:
try:
r = h.getresponse(buffering=True)
except TypeError: # buffering kw not supported
r = h.getresponse()
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSHandler(AbstractHTTPHandler):
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar=None):
import cookielib
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/' and (req.host and
req.host != 'localhost'):
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(
socket.gethostbyname_ex('localhost')[2] +
socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.get_host()
filename = req.get_selector()
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = mimetools.Message(StringIO(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or \
(not port and _safe_gethostbyname(host) in self.get_names()):
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError, msg:
# urllib2 users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
fw = ftpwrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
## fw.ftp.set_debuglevel(1)
return fw
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
# then check the size
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear()
|
sephii/django
|
refs/heads/master
|
django/contrib/sites/__init__.py
|
808
|
default_app_config = 'django.contrib.sites.apps.SitesConfig'
|
maginatics/swift
|
refs/heads/master
|
test/__init__.py
|
21
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import sys
import os
try:
from unittest.util import safe_repr
except ImportError:
# Probably py26
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
# make unittests pass on all locale
import swift
setattr(swift, 'gettext_', lambda x: x)
from swift.common.utils import readconf
# Work around what seems to be a Python bug.
# c.f. https://bugs.launchpad.net/swift/+bug/820185.
import logging
logging.raiseExceptions = False
def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config = {}
if defaults is not None:
config.update(defaults)
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except SystemExit:
if not os.path.exists(config_file):
print >>sys.stderr, \
'Unable to read test config %s - file not found' \
% config_file
elif not os.access(config_file, os.R_OK):
print >>sys.stderr, \
'Unable to read test config %s - permission denied' \
% config_file
else:
print >>sys.stderr, \
'Unable to read test config %s - section %s not found' \
% (config_file, section_name)
return config
|
harvard-dce/dce_lti_py
|
refs/heads/master
|
setup.py
|
1
|
import os
import re
import sys
import codecs
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
here = os.path.abspath(os.path.dirname(__file__))
def read(path):
return codecs.open(os.path.join(here, path), 'r', 'utf-8').read()
version_file = read('dce_lti_py/__init__.py')
version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M).group(1)
setup(
name='dce_lti_py',
version=version,
description='A python library for building and/or consuming LTI apps',
author='Jay Luker',
author_email='jay_luker@harvard.edu',
url='https://github.com/harvard-dce/dce_lti_py',
packages=find_packages(),
install_requires=['lxml', 'oauthlib', 'requests-oauthlib'],
license='MIT License',
keywords='lti',
zip_safe=True,
test_suite='tests',
tests_require=['pytest', 'mock==1.0.1', 'httmock'],
cmdclass={'test': PyTest}
)
|
luipugs/kamatis
|
refs/heads/master
|
kamatis/tests.py
|
1
|
from kamatis import util
import os
import shutil
import tempfile
import unittest
class TestMakedirs(unittest.TestCase):
def test_create(self):
tempdir = tempfile.gettempdir()
parent = os.tempnam(tempdir)
path = os.path.join(parent, 'testdir')
isdir = util.makedirs(path)
self.assertEqual(isdir, True)
self.assertEqual(os.path.isdir(path), True)
shutil.rmtree(parent)
def test_existing(self):
path = tempfile.mkdtemp()
isdir = util.makedirs(path)
self.assertEqual(isdir, True)
os.rmdir(path)
def test_existing_nondir(self):
_, path = tempfile.mkstemp()
isdir = util.makedirs(path)
self.assertEqual(isdir, False)
os.unlink(path)
def test_create_error(self):
parent = tempfile.mkdtemp()
os.chmod(parent, 0000)
path = os.path.join(parent, 'testdir')
isdir = util.makedirs(path)
self.assertEqual(isdir, False)
os.rmdir(parent)
if __name__ == '__main__':
unittest.main()
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/resources/types/feed_item.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import custom_parameter
from google.ads.googleads.v8.common.types import feed_common
from google.ads.googleads.v8.common.types import policy
from google.ads.googleads.v8.enums.types import feed_item_quality_approval_status
from google.ads.googleads.v8.enums.types import feed_item_quality_disapproval_reason
from google.ads.googleads.v8.enums.types import feed_item_status
from google.ads.googleads.v8.enums.types import feed_item_validation_status
from google.ads.googleads.v8.enums.types import geo_targeting_restriction as gage_geo_targeting_restriction
from google.ads.googleads.v8.enums.types import placeholder_type
from google.ads.googleads.v8.enums.types import policy_approval_status
from google.ads.googleads.v8.enums.types import policy_review_status
from google.ads.googleads.v8.errors.types import feed_item_validation_error
__protobuf__ = proto.module(
package='google.ads.googleads.v8.resources',
marshal='google.ads.googleads.v8',
manifest={
'FeedItem',
'FeedItemAttributeValue',
'FeedItemPlaceholderPolicyInfo',
'FeedItemValidationError',
},
)
class FeedItem(proto.Message):
r"""A feed item.
Attributes:
resource_name (str):
Immutable. The resource name of the feed item. Feed item
resource names have the form:
``customers/{customer_id}/feedItems/{feed_id}~{feed_item_id}``
feed (str):
Immutable. The feed to which this feed item
belongs.
id (int):
Output only. The ID of this feed item.
start_date_time (str):
Start time in which this feed item is
effective and can begin serving. The time is in
the customer's time zone. The format is "YYYY-
MM-DD HH:MM:SS".
Examples: "2018-03-05 09:15:00" or "2018-02-01
14:34:30".
end_date_time (str):
End time in which this feed item is no longer
effective and will stop serving. The time is in
the customer's time zone. The format is "YYYY-
MM-DD HH:MM:SS".
Examples: "2018-03-05 09:15:00" or "2018-02-01
14:34:30".
attribute_values (Sequence[google.ads.googleads.v8.resources.types.FeedItemAttributeValue]):
The feed item's attribute values.
geo_targeting_restriction (google.ads.googleads.v8.enums.types.GeoTargetingRestrictionEnum.GeoTargetingRestriction):
Geo targeting restriction specifies the type
of location that can be used for targeting.
url_custom_parameters (Sequence[google.ads.googleads.v8.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
status (google.ads.googleads.v8.enums.types.FeedItemStatusEnum.FeedItemStatus):
Output only. Status of the feed item.
This field is read-only.
policy_infos (Sequence[google.ads.googleads.v8.resources.types.FeedItemPlaceholderPolicyInfo]):
Output only. List of info about a feed item's
validation and approval state for active feed
mappings. There will be an entry in the list for
each type of feed mapping associated with the
feed, e.g. a feed with a sitelink and a call
feed mapping would cause every feed item
associated with that feed to have an entry in
this list for both sitelink and call. This field
is read-only.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
feed = proto.Field(
proto.STRING,
number=11,
optional=True,
)
id = proto.Field(
proto.INT64,
number=12,
optional=True,
)
start_date_time = proto.Field(
proto.STRING,
number=13,
optional=True,
)
end_date_time = proto.Field(
proto.STRING,
number=14,
optional=True,
)
attribute_values = proto.RepeatedField(
proto.MESSAGE,
number=6,
message='FeedItemAttributeValue',
)
geo_targeting_restriction = proto.Field(
proto.ENUM,
number=7,
enum=gage_geo_targeting_restriction.GeoTargetingRestrictionEnum.GeoTargetingRestriction,
)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE,
number=8,
message=custom_parameter.CustomParameter,
)
status = proto.Field(
proto.ENUM,
number=9,
enum=feed_item_status.FeedItemStatusEnum.FeedItemStatus,
)
policy_infos = proto.RepeatedField(
proto.MESSAGE,
number=10,
message='FeedItemPlaceholderPolicyInfo',
)
class FeedItemAttributeValue(proto.Message):
r"""A feed item attribute value.
Attributes:
feed_attribute_id (int):
Id of the feed attribute for which the value
is associated with.
integer_value (int):
Int64 value. Should be set if feed_attribute_id refers to a
feed attribute of type INT64.
boolean_value (bool):
Bool value. Should be set if feed_attribute_id refers to a
feed attribute of type BOOLEAN.
string_value (str):
String value. Should be set if feed_attribute_id refers to a
feed attribute of type STRING, URL or DATE_TIME. For STRING
the maximum length is 1500 characters. For URL the maximum
length is 2076 characters. For DATE_TIME the string must be
in the format "YYYYMMDD HHMMSS".
double_value (float):
Double value. Should be set if feed_attribute_id refers to a
feed attribute of type DOUBLE.
price_value (google.ads.googleads.v8.common.types.Money):
Price value. Should be set if feed_attribute_id refers to a
feed attribute of type PRICE.
integer_values (Sequence[int]):
Repeated int64 value. Should be set if feed_attribute_id
refers to a feed attribute of type INT64_LIST.
boolean_values (Sequence[bool]):
Repeated bool value. Should be set if feed_attribute_id
refers to a feed attribute of type BOOLEAN_LIST.
string_values (Sequence[str]):
Repeated string value. Should be set if feed_attribute_id
refers to a feed attribute of type STRING_LIST, URL_LIST or
DATE_TIME_LIST. For STRING_LIST and URL_LIST the total size
of the list in bytes may not exceed 3000. For DATE_TIME_LIST
the number of elements may not exceed 200.
For STRING_LIST the maximum length of each string element is
1500 characters. For URL_LIST the maximum length is 2076
characters. For DATE_TIME the format of the string must be
the same as start and end time for the feed item.
double_values (Sequence[float]):
Repeated double value. Should be set if feed_attribute_id
refers to a feed attribute of type DOUBLE_LIST.
"""
feed_attribute_id = proto.Field(
proto.INT64,
number=11,
optional=True,
)
integer_value = proto.Field(
proto.INT64,
number=12,
optional=True,
)
boolean_value = proto.Field(
proto.BOOL,
number=13,
optional=True,
)
string_value = proto.Field(
proto.STRING,
number=14,
optional=True,
)
double_value = proto.Field(
proto.DOUBLE,
number=15,
optional=True,
)
price_value = proto.Field(
proto.MESSAGE,
number=6,
message=feed_common.Money,
)
integer_values = proto.RepeatedField(
proto.INT64,
number=16,
)
boolean_values = proto.RepeatedField(
proto.BOOL,
number=17,
)
string_values = proto.RepeatedField(
proto.STRING,
number=18,
)
double_values = proto.RepeatedField(
proto.DOUBLE,
number=19,
)
class FeedItemPlaceholderPolicyInfo(proto.Message):
r"""Policy, validation, and quality approval info for a feed item
for the specified placeholder type.
Attributes:
placeholder_type_enum (google.ads.googleads.v8.enums.types.PlaceholderTypeEnum.PlaceholderType):
Output only. The placeholder type.
feed_mapping_resource_name (str):
Output only. The FeedMapping that contains
the placeholder type.
review_status (google.ads.googleads.v8.enums.types.PolicyReviewStatusEnum.PolicyReviewStatus):
Output only. Where the placeholder type is in
the review process.
approval_status (google.ads.googleads.v8.enums.types.PolicyApprovalStatusEnum.PolicyApprovalStatus):
Output only. The overall approval status of
the placeholder type, calculated based on the
status of its individual policy topic entries.
policy_topic_entries (Sequence[google.ads.googleads.v8.common.types.PolicyTopicEntry]):
Output only. The list of policy findings for
the placeholder type.
validation_status (google.ads.googleads.v8.enums.types.FeedItemValidationStatusEnum.FeedItemValidationStatus):
Output only. The validation status of the
palceholder type.
validation_errors (Sequence[google.ads.googleads.v8.resources.types.FeedItemValidationError]):
Output only. List of placeholder type
validation errors.
quality_approval_status (google.ads.googleads.v8.enums.types.FeedItemQualityApprovalStatusEnum.FeedItemQualityApprovalStatus):
Output only. Placeholder type quality
evaluation approval status.
quality_disapproval_reasons (Sequence[google.ads.googleads.v8.enums.types.FeedItemQualityDisapprovalReasonEnum.FeedItemQualityDisapprovalReason]):
Output only. List of placeholder type quality
evaluation disapproval reasons.
"""
placeholder_type_enum = proto.Field(
proto.ENUM,
number=10,
enum=placeholder_type.PlaceholderTypeEnum.PlaceholderType,
)
feed_mapping_resource_name = proto.Field(
proto.STRING,
number=11,
optional=True,
)
review_status = proto.Field(
proto.ENUM,
number=3,
enum=policy_review_status.PolicyReviewStatusEnum.PolicyReviewStatus,
)
approval_status = proto.Field(
proto.ENUM,
number=4,
enum=policy_approval_status.PolicyApprovalStatusEnum.PolicyApprovalStatus,
)
policy_topic_entries = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=policy.PolicyTopicEntry,
)
validation_status = proto.Field(
proto.ENUM,
number=6,
enum=feed_item_validation_status.FeedItemValidationStatusEnum.FeedItemValidationStatus,
)
validation_errors = proto.RepeatedField(
proto.MESSAGE,
number=7,
message='FeedItemValidationError',
)
quality_approval_status = proto.Field(
proto.ENUM,
number=8,
enum=feed_item_quality_approval_status.FeedItemQualityApprovalStatusEnum.FeedItemQualityApprovalStatus,
)
quality_disapproval_reasons = proto.RepeatedField(
proto.ENUM,
number=9,
enum=feed_item_quality_disapproval_reason.FeedItemQualityDisapprovalReasonEnum.FeedItemQualityDisapprovalReason,
)
class FeedItemValidationError(proto.Message):
r"""Stores a validation error and the set of offending feed
attributes which together are responsible for causing a feed
item validation error.
Attributes:
validation_error (google.ads.googleads.v8.errors.types.FeedItemValidationErrorEnum.FeedItemValidationError):
Output only. Error code indicating what
validation error was triggered. The description
of the error can be found in the 'description'
field.
description (str):
Output only. The description of the
validation error.
feed_attribute_ids (Sequence[int]):
Output only. Set of feed attributes in the
feed item flagged during validation. If empty,
no specific feed attributes can be associated
with the error (e.g. error across the entire
feed item).
extra_info (str):
Output only. Any extra information related to this error
which is not captured by validation_error and
feed_attribute_id (e.g. placeholder field IDs when
feed_attribute_id is not mapped). Note that extra_info is
not localized.
"""
validation_error = proto.Field(
proto.ENUM,
number=1,
enum=feed_item_validation_error.FeedItemValidationErrorEnum.FeedItemValidationError,
)
description = proto.Field(
proto.STRING,
number=6,
optional=True,
)
feed_attribute_ids = proto.RepeatedField(
proto.INT64,
number=7,
)
extra_info = proto.Field(
proto.STRING,
number=8,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
lukauskas/dgw
|
refs/heads/master
|
dgw/cluster/analysis.py
|
2
|
from collections import defaultdict
from logging import debug
from math import floor
import scipy.cluster.hierarchy as hierarchy
import pandas as pd
import numpy as np
from ..data.containers import AlignmentsData
from ..dtw.distance import dtw_std, dtw_path_is_reversed, warping_conservation_vector
from ..dtw import transformations, dtw_projection, no_nans_len
from ..dtw.parallel import parallel_dtw_paths
import gzip
import scipy.stats
from scipy.cluster._hierarchy import get_max_dist_for_each_cluster
def compute_paths(data, dtw_nodes_list, n, n_processes=None, *dtw_args, **dtw_kwargs):
non_leaf_nodes = dtw_nodes_list[n:]
paths = parallel_dtw_paths(data, non_leaf_nodes, n_processes=n_processes, *dtw_args, **dtw_kwargs)
return paths
def _to_dtw_tree(linkage, hierarchical_clustering_object, prototypes, prototyping_function='mean'):
"""
Converts a hierarchical clustering linkage matrix `linkage` to hierarchy of `DTWClusterNode`s.
This is a modification of `scipy.cluster.hierarchy.to_tree` function and the code is mostly taken from it.
:param linkage: linkage matrix to convert to the DTW Tree
:param hierarchical_clustering_object: hierarchical clustering object to work with
:param prototyping_function: "reduce" function for prototype calculation, or "mean" to simply use data mean
"""
# Validation
linkage = np.asarray(linkage, order='c')
hierarchy.is_valid_linkage(linkage, throw=True, name='Z')
data = hierarchical_clustering_object.data
labels = data.items
values = data.ix
n = linkage.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
index = labels[i]
d[i] = DTWClusterNode(id=index, hierarchical_clustering_object=hierarchical_clustering_object,
prototype=values[index])
nd = None
for i in xrange(0, n - 1):
fi = int(linkage[i, 0])
fj = int(linkage[i, 1])
assert(fi <= i + n)
assert(fj <= i + n)
id = i + n
left = d[fi]
right = d[fj]
dist = linkage[i, 2]
if prototypes:
prototype = prototypes[id]
nd = DTWClusterNode(id=id, hierarchical_clustering_object=hierarchical_clustering_object,
prototype=prototype,
left=left, right=right,
dist=linkage[i, 2])
elif callable(prototyping_function):
prototype = prototyping_function(left.prototype.values, right.prototype.values, left.count, right.count)
nd = DTWClusterNode(id=id, hierarchical_clustering_object=hierarchical_clustering_object,
prototype=prototype,
left=left, right=right,
dist=linkage[i, 2])
elif prototyping_function == 'mean':
nd = DTWClusterNode(id=id, hierarchical_clustering_object=hierarchical_clustering_object,
prototype=None,
left=left, right=right,
dist=linkage[i, 2])
# A bit hacky, but does job. Doing this as to get to use nd.data
nd._prototype = nd.data.mean()
assert(linkage[i, 3] == nd.count)
d[n + i] = nd
return nd, d
def add_path_data(dtw_nodes, n, paths):
"""
Adds precomputed path data to dtw_nodes
:param data:
:param dtw_nodes:
:param n:
:param paths:
:return:
"""
# Loop through non-leaf nodes
for node in dtw_nodes[n:]:
node.warping_paths = paths[node.id]
# Inheritting from object explicitly as hierarchy.ClusterNode is not doing this
class DTWClusterNode(object, hierarchy.ClusterNode):
_prototype = None
_index = None
_hierarchical_clustering_object = None
_projected_data = None
_warping_paths = None
_tracked_points = None
_tracked_points_histogram = None
_points_of_interest_histogram = None
_warping_conservation_data = None
def __init__(self, hierarchical_clustering_object, id, prototype, left=None, right=None, dist=0, count=1):
hierarchy.ClusterNode.__init__(self, id, left=left, right=right, dist=dist, count=count)
if not isinstance(prototype, pd.DataFrame):
prototype = pd.DataFrame(prototype, columns=hierarchical_clustering_object.data.dataset_axis)
self._prototype = prototype
self._hierarchical_clustering_object = hierarchical_clustering_object
self._index = pd.Index(self.__get_item_ids())
# Assume no points of interest. TODO: Add way to specify those
self._points_of_interest = {}
def __get_item_ids(self):
"""
Gets the ids of all the leaf nodes that are in the tree below
:return:
"""
if self.is_leaf():
return pd.Index([self.id])
else:
return self.get_left().index | self.get_right().index
@property
def prototype(self):
return self._prototype
@property
def index(self):
return self._index
def reindex(self, new_index):
# Make sure they are the same elements just in different order
assert(len(new_index & self._index) == len(self._index) == len(new_index))
self._index = new_index
return self._index
@property
def data(self):
# Don't store the data inside
return self._hierarchical_clustering_object.data.ix[self.index]
@property
def projected_data(self):
# It is infeasible to calculate projected data for all the nodes beforehand
self.ensure_projections_are_calculated()
return self._projected_data
def ensure_projections_are_calculated(self):
if not self._projected_data:
self._projected_data = self.__project_items_onto_prototype()
def _calculate_histogram(self, points_of_interest, number_of_bins, lengths=None):
histogram = defaultdict(lambda: np.zeros(number_of_bins))
for ix, poi in points_of_interest.iteritems():
if lengths is not None:
scaling_ratio = float(number_of_bins) / lengths[ix]
else:
scaling_ratio = 1
for poi_name, points in poi.iteritems():
current_histogram = histogram[poi_name]
for point in set(points):
min_rescaled = int(floor(point * scaling_ratio))
max_rescaled = int(floor((point + 1) * scaling_ratio))
for rescaled_point in xrange(min_rescaled, max_rescaled):
assert rescaled_point <= number_of_bins
current_histogram[rescaled_point] += 1
return pd.DataFrame(histogram)
@property
def tracked_points_histogram(self):
if self._tracked_points_histogram is None:
self.ensure_points_of_interest_are_tracked_down()
self._tracked_points_histogram = self._calculate_histogram(self._tracked_points, len(self.prototype))
return self._tracked_points_histogram
@property
def points_of_interest_histogram(self):
if self._points_of_interest_histogram is None:
self._points_of_interest_histogram = self._calculate_histogram(self.points_of_interest,
max(self.data.lengths),
self.data.lengths)
return self._points_of_interest_histogram
def ensure_points_of_interest_are_tracked_down(self):
if self._tracked_points is None:
self._tracked_points = self.__track_points_of_interest()
@property
def warping_paths(self):
if not self._warping_paths:
raise Exception('Warping paths not computed')
return self._warping_paths
@warping_paths.setter
def warping_paths(self, values):
self._warping_paths = values
@property
def regions(self):
"""
:rtype: Regions
"""
if self._hierarchical_clustering_object.regions is None:
return None
else:
return self._hierarchical_clustering_object.regions.ix[self.index]
def __compute_dtw_warping_paths(self):
data = self.data
print(data)
dtw_function = self._hierarchical_clustering_object.dtw_function
prototype = self.prototype
paths = {}
for ix in data.items:
_, _, path = dtw_function(data.ix[ix], prototype, dist_only=False)
# Reduce the bit size of the path arrays to 16 bits
# DTW would be too slow to use anyway if we had more than 2**16-1 items in it
# Feel free to update this if it is not the case.
#path = (np.asarray(path[0], dtype=np.int16), np.asarray(path[1], dtype=np.int16))
paths[ix] = path
return paths
def save_as_encode_track(self, filename, track_name=None, track_description=None):
if self.regions is None:
raise Exception('Cannot save {0!r} as region locations are not specified'.format(self))
if track_name:
track_kwargs = {'name': track_name}
if track_description:
track_kwargs['description'] = track_description
else:
track_kwargs = {}
regions = self.regions
regions = regions.infer_strand_from_whether_the_region_was_reversed_or_not(self.reversal_dictionary)
regions.to_bed(filename, **track_kwargs)
def save_prototype_to_text(self, filename):
prototype = self.prototype
f = open(filename, 'w')
try:
f.write('#{0}'.format('bin'))
for col in prototype.columns:
f.write('\t{0}'.format(col))
f.write('\n')
for bin, row in prototype.iterrows():
f.write('{0}'.format(bin))
for col in prototype.columns:
f.write('\t{0}'.format(row[col]))
f.write('\n')
finally:
f.close()
def save_conservation_coefficient_as_text(self, filename):
conservation_vector = self.warping_conservation_vector()
f = open(filename, 'w')
try:
f.write('#{0}\t{1}\t{2}\n'.format('start_bin', 'end_bin', 'avg_conservation'))
for i in xrange(len(conservation_vector)):
f.write('{0}\t{1}\t{2}\n'.format(i, i+1, conservation_vector[i]))
finally:
f.close()
def save_as_list_of_indices(self, filename):
index = self.data.items
f = open(filename, 'w')
try:
for ix in index:
f.write('{0}\n'.format(ix))
finally:
f.close()
def save_poi_histograms_to_file(self, basename):
points_of_interest = self.points_of_interest
if not points_of_interest:
return
raw_filename = basename + '-raw.csv'
warped_filename = basename + '-warped.csv'
self.points_of_interest_histogram.to_csv(raw_filename)
self.tracked_points_histogram.to_csv(warped_filename)
def poi_entropies(self):
if not self.points_of_interest:
return None
untracked_hist = self.points_of_interest_histogram
tracked_hist = self.tracked_points_histogram
untracked_entropy = pd.Series(scipy.stats.entropy(untracked_hist),
index=untracked_hist.columns, name='raw')
tracked_entropy = pd.Series(scipy.stats.entropy(tracked_hist),
index=tracked_hist.columns, name='warped')
df = pd.DataFrame([untracked_entropy, tracked_entropy]).T
df.index.name = 'poi_file'
df['diff'] = df['raw'] - df['warped']
df['rel_diff'] = df['diff'] / df['raw']
return df
def save_pois_to_file(self, filename):
points_of_interest = self.points_of_interest
if not points_of_interest:
return
with gzip.GzipFile(filename, 'w') as f:
f.write('#region\tpoi_file\tbins\tprototype_bins\n')
points_of_interest = self.points_of_interest
warped_points_of_interest = self.tracked_points_of_interest
for region, poi_data in points_of_interest.iteritems():
warped_poi_data = warped_points_of_interest[region]
for poi_filename, pois in poi_data.iteritems():
warped_pois = warped_poi_data[poi_filename]
str_pois = ';'.join(map(str, pois))
str_warped_pois = ';'.join(map(str, warped_pois))
f.write('{}\t{}\t{}\t{}\n'.format(region, poi_filename, str_pois, str_warped_pois))
def save_warpings_to_file(self, filename):
data = self.data
index = data.items
regions = self.regions
if not self.is_leaf():
warping_paths = self.warping_paths
else:
# if we're at a leaf node, there are no warping paths
# so we just create sample warping paths that just map each point in data to itself
assert len(data.items) == 1 # assumption for sanity
first_ix = data.items[0]
data_len = len(data.ix[first_ix].dropna())
warping_paths = {first_ix: np.vstack([np.arange(data_len),
np.arange(data_len)])}
if regions:
bin_intervals = regions.ix[index].bins_to_intervals(data.resolution)
chromosomes = regions.chromosome
f = gzip.open(filename, 'w')
f.write('#{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format('index', 'bin', 'chromosome', 'start', 'end', 'prototype_bin'))
try:
for ix, chromosome in chromosomes.iteritems():
path = warping_paths[ix]
bi = bin_intervals[ix]
for i in xrange(len(path[0])):
p_a = path[0][i]
p_b = path[1][i]
current_bin = bi[p_a]
f.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format(ix, p_a, chromosome,
current_bin[0], current_bin[1],
p_b))
finally:
f.close()
else:
f = gzip.open(filename, 'w')
try:
f.write('#{0}\t{1}\t{2}\n'.format('index', 'bin', 'prototype_bin'))
for ix, path in warping_paths.items():
for i in xrange(len(path[0])):
p_a = path[0][i]
p_b = path[1][i]
f.write('{0}\t{1}\t{2}\n'.format(ix, p_a, p_b))
finally:
f.close()
def save_warping_conservation_data_to_file(self, filename):
data = self.data
index = data.items
conservation_data = self.warping_conservation_data
f = gzip.open(filename, 'w')
f.write('#{0}\t{1}\t{2}\t{3}\n'.format('index', 'start_bin', 'end_bin', 'conservation_coefficient'))
try:
for ix in index:
conservation_vector = conservation_data.ix[ix]
start_i = None
current_value = None
for i in xrange(len(conservation_vector)):
value = conservation_vector[i]
if value < 1e-6:
if start_i is not None: # 1e-6 to deal with floating-point issues
# If were at zero, some conserved region finishes here
f.write('{0}\t{1}\t{2}\t{3}\n'.format(ix, start_i, i, current_value))
start_i = None
current_value = None
else:
pass # Nothing to do here
elif start_i is None:
start_i = i
current_value = value
else:
try:
assert(abs(value - current_value) < 1e-6)
except AssertionError:
debug(value, current_value)
raise
finally:
f.close()
def __project_items_onto_prototype(self):
if self.is_leaf():
# there is no projection, really, return itself
return self.data
else:
data = self.data
prototype = self.prototype
warping_paths = self.warping_paths
columns = data.dataset_axis
projections = {}
for ix in data.items:
item = data.ix[ix]
projection = dtw_projection(item, prototype, path=warping_paths[ix])
df = pd.DataFrame(projection, index=range(len(prototype)), columns=columns)
projections[ix] = df
panel = pd.Panel(projections)
panel = panel.ix[self.data.items]
ad = AlignmentsData(panel, self.data.resolution)
return ad
def _compute_warping_conservation_data(self):
data = self.data
if self.is_leaf():
conservation_data = [np.ones(no_nans_len(self.prototype)-1)]
else:
warping_paths = self.warping_paths
conservation_data = []
for ix in data.items:
path = warping_paths[ix]
conservation_data.append(warping_conservation_vector(path))
return pd.DataFrame(conservation_data, index=self.index)
@property
def warping_conservation_data(self):
if self._warping_conservation_data is None:
self._warping_conservation_data = self._compute_warping_conservation_data()
return self._warping_conservation_data
def warping_conservation_vector(self):
conservation_vector = self.warping_conservation_data.mean()
return conservation_vector
def __track_points_of_interest(self):
if self.is_leaf():
# Nothing to track
return self.points_of_interest
else:
points_of_interest = self.points_of_interest
warping_paths = self.warping_paths
tracked_points = defaultdict(lambda: {})
for ix, pois in points_of_interest.iteritems():
for j, poi in pois.iteritems():
mapped_points = transformations.points_mapped_to(poi, warping_paths[ix])
tracked_points[ix][j] = mapped_points
return tracked_points
@property
def points_of_interest(self):
poi = self.data.points_of_interest
return poi
@property
def reversal_dictionary(self):
if self.is_leaf():
# If we are leaf node, there is no DTW projection onto prototype
# and therefore strand cannot be inferred.
# Just return None
return {ix: None for ix in self.index}
warping_paths = self.warping_paths
reversal_dict = {}
for item in self.index:
reversal_dict[item] = dtw_path_is_reversed(warping_paths[item])
return reversal_dict
@property
def tracked_points_of_interest(self):
self.ensure_points_of_interest_are_tracked_down()
tracked_points = self._tracked_points
return tracked_points
@property
def n_items(self):
"""
Returns number of leaves below the current node
"""
return len(self.index)
def __len__(self):
return self.n_items
def __repr__(self):
return "<{0} containing {1} items>".format(self.__class__.__name__, self.n_items)
def _reduce_tree(tree, reduce_func, map_function=lambda node: node.id,
is_value=lambda x: not isinstance(x, hierarchy.ClusterNode)):
"""
Performs reduce operation on the binary tree generated by hierarchical clustering.
Implementation does not use recursion in order not to overflow the stack
Equivalent to:
```
def reduce(tree, reduce_function, map_function):
if tree.is_leaf():
return map_function(tree)
else:
left = reduce(tree.get_left(), reduce_function, map_function)
right = reduce(tree.get_right(), reduce_function, map_function)
return reduce_function(tree, left, right)
```
:param tree: root node of the tree
:type tree: `scipy.cluster.hierarchy.ClusterNode`
:param reduce_func: function that will be performed on reduction. Takes three parameters: node, left branch, right branch
:param map_function: function that gets the value of a `ClusterNode` defaults to retrieving the node's id
:param is_value: function that determines whether a (reduced) item or just another node in cluster
:return: The result of reduction function
"""
def _add_children_to_stack(node):
stack.append(node) # Append self to stack so we can trace it back later
stack.append(node.get_left())
stack.append(node.get_right())
stack = [tree]
while stack:
node = stack.pop()
if not is_value(node):
if node.is_leaf():
try:
stack.append(map_function(node))
except Exception, e:
print "Got {0!r} while getting value of {1!r}".format(e, node)
raise
else:
_add_children_to_stack(node)
else:
try:
next_node = stack.pop()
except IndexError:
return node
if not is_value(next_node):
stack.append(node)
stack.append(next_node)
else:
parent_node = stack.pop()
assert(not is_value(parent_node))
try:
reduced_value = reduce_func(parent_node, node, next_node)
except Exception, e:
print "Got {0!r} on reduce of {1!r} {2!r}".format(e, node, next_node)
raise
stack.append(reduced_value)
class HierarchicalClustering(object):
_condensed_distance_matrix = None
_data = None
_linkage_matrix = None
_distance_threshold = None
__dtw_args = None
__dtw_kwargs = None
_regions = None
__tree = None
__tree_nodes_list = None
def __init__(self, data, regions, linkage_matrix, dtw_function=dtw_std, prototypes=None, prototyping_method='psa'):
"""
Initialises hierarchical clustering analyser.
Handles linkage calculation, dendrogram plotting and prototype generation.
Supports three prototyping methods:
Prioretised Shape Averaging (PSA) (prototyping_method='psa')
as described in [#Niennattrakul:2009ep], see also `dgw.transformations.sdtw_averaging`
Standard average of DTW path (prototyping_method='standard')
averages the DTW paths for each pair of nodes with the same parent.
The weights are determined by how many sequences were averaged into the node.
see `dgw.transformations.dtw_path_averaging`
Unweighted standard average of DTW path (prototyping_method='standard-unweighted')
similar to the standard method above, but does not bias sequences higher up the tree
Mean (prototyping_method='mean')
simply the mean of the data
.. [#Niennattrakul:2009ep] Vit Niennattrakul and Chotirat Ann Ratanamahatana "Shape averaging under Time Warping",
2009 6th International Conference on Electrical Engineering/Electronics, Computer,
Telecommunications and Information Technology (ECTI-CON)
:param data: a pd.DataFrame object of the data in clusters
:type data: AlignmentsData
:param linkage_matrix: linkage_matrix computed by fastcluster.linkage.
:param dtw_function: DTW calculation function
:param prototypes: cluster node prototypes (will be computed if None)
:param prototyping_method: Averaging method either 'psa', 'standard', 'standard-unweighted', 'mean'
:return:
"""
if not isinstance(data, AlignmentsData):
raise ValueError('Data should be instance of {0}'.format(AlignmentsData.__name__))
self._data = data
self._regions = regions
# small negative distances in linkage matrix are sometimes possible due to rounding errors. Change them to 0
linkage_matrix[:, 2][linkage_matrix[:, 2] < 0] = 0
self._linkage_matrix = linkage_matrix
self.__dtw_function = dtw_function
tree, tree_nodes = self.__dtw_tree_from_linkage(linkage_matrix, prototypes, prototyping_method)
self.__tree = tree
self.__tree_nodes_list = tree_nodes
def extract_prototypes(self):
prototypes = {}
for node in self.tree_nodes_list:
prototypes[node.id] = node.prototype
return prototypes
def __dtw_tree_from_linkage(self, linkage, prototypes, method):
"""
Computes a prototyped tree from linkage matrix
:param linkage: linkage matrix
:param prototypes: possibly precomputed prototypes
:param method: prototyping method
:return:
"""
if method == 'psa':
averaging_func = lambda x, y, wx, wy: \
transformations.sdtw_averaging(x, y, wx, wy, dtw_function=self.dtw_function)
elif method == 'standard':
averaging_func = lambda x, y, wx, wy: \
transformations.dtw_path_averaging(x, y, wx, wy, dtw_function=self.dtw_function)
elif method == 'standard-unweighted':
averaging_func = lambda x, y, wx, wy: \
transformations.dtw_path_averaging(x, y, 1, 1, dtw_function=self.dtw_function)
elif method == 'mean':
averaging_func = 'mean' # Not a function really, but the code will deal with it
else:
raise ValueError('Incorrect method supplied: '
'only \'psa\', \'standard\' or \'standard-unweighted\' supported')
return _to_dtw_tree(linkage, self, prototypes, averaging_func)
@property
def data(self):
"""
Returns the data that is internal to the object
:rtype: AlignmentsData
"""
return self._data
@property
def regions(self):
return self._regions
@property
def dtw_function(self):
return self.__dtw_function
@property
def condensed_distance_matrix(self):
return self._condensed_distance_matrix
@property
def linkage(self):
"""
Returns the linkage matrix of the clustering provided
:return: linkage matrix.
"""
return self._linkage_matrix
def distance_threshold_for_n_clusters(self, n_clusters):
"""
Returns distance threshold that can be used to forn `n_clusters`.
:param n_clusters: number of clusters to form
:return:
"""
n_clusters = int(n_clusters)
assert n_clusters > 0, 'Minimum number of clusters is 1, got {}'.format(n_clusters)
linkage = self.linkage
n = self.num_obs
assert n >= n_clusters, 'Specified number of clusters ' \
'{} is larger than number of data points {}'.format(n_clusters, n)
# Special case, otherwise it doesn't work
if n_clusters == 1:
return np.inf
else:
max_distances = np.empty(self.num_obs, dtype=np.double)
get_max_dist_for_each_cluster(linkage, max_distances, n)
threshold = max_distances[-n_clusters]
return threshold
@property
def num_obs(self):
return self.data.number_of_items
@property
def dataset_names(self):
return self.data.dataset_axis
def as_tree(self):
return self.__tree
@property
def tree_nodes_list(self):
return self.__tree_nodes_list
def dendrogram(self, ax=None, no_labels=True, *args, **kwargs):
"""
Plots the dendrogram for the hieararchical clustering
:return:
"""
from dgw.util.plotting import pyplot as plt
linkage = self.linkage
if ax is None:
ax = plt.gca()
color_threshold = kwargs.pop('color_threshold', self._distance_threshold)
ans = hierarchy.dendrogram(linkage, no_labels=no_labels,
above_threshold_color='k',
color_threshold=color_threshold,
*args, **kwargs)
ax.set_xlabel('Distance')
return ans
def _rename_nodes(self, tree):
"""
Renames the leaf nodes of the cluster tree generated by linkage to match the actual indices of the data.
:param tree:
:return:
"""
stack = [tree]
while stack:
node = stack.pop()
if node.is_leaf():
node.id = self.data.items[node.id]
else:
stack.append(node.get_left())
stack.append(node.get_right())
def cut(self, t):
"""
Cuts the dendrogram at specified threshold t.
:param t: threshold
:return:
"""
self._distance_threshold = t
root = self.as_tree()
queue = set([root])
clusters = set()
while queue:
current_node = queue.pop()
if current_node.dist >= t:
queue.add(current_node.get_right())
queue.add(current_node.get_left())
else:
clusters.add(current_node)
return ClusterAssignments(self, clusters, t)
def cut_and_resort(self, cut_threshold, index):
"""
Cuts the dendrogram based on the color list returned by `scipy.cluster.hierarchy.dedrogram`
:param cut_threshold: cut threhsold to cut clusters at
:param index: index of nodes that is already in order
:return: clusters
"""
cluster_assignments = self.cut(cut_threshold)
already_asigned_indices = pd.Index([])
for cluster in cluster_assignments:
cluster_index = cluster.index
sub_index = pd.Index([i for i in index if i in cluster_index])
cluster.reindex(sub_index)
if len(already_asigned_indices & sub_index):
raise Exception("There is some overlap between cluster cuts. There shouldn't be")
already_asigned_indices = (already_asigned_indices | sub_index)
return cluster_assignments
class ClusterAssignments(object):
_hierarchical_clustering_object = None
_cluster_roots = None
_clusters = None
_cut_depth = None
def __init__(self, hierarchical_clustering_object, cluster_roots, cut_depth):
self._hierarchical_clustering_object = hierarchical_clustering_object
self._cluster_roots = cluster_roots
clusters = []
for cluster_root in self._cluster_roots:
clusters.append(cluster_root)
self._clusters = clusters
self._cut_depth = cut_depth
@property
def n(self):
return len(self._cluster_roots)
def __len__(self):
return self.n
@property
def dataset_names(self):
return self.hierarchical_clustering_object.dataset_names
@property
def cut_depth(self):
return self._cut_depth
@property
def hierarchical_clustering_object(self):
return self._hierarchical_clustering_object
@property
def cluster_sizes(self):
return pd.Series(map(len, self.clusters))
def __repr__(self):
if len(self.clusters) <= 10:
clusters_repr = '\n'.join(map(repr, self.clusters))
else:
clusters_repr = '\n'.join(map(repr, self.clusters[:3]))
clusters_repr += '\n...\n'
clusters_repr += '\n'.join(map(repr, self.clusters[-3:]))
return '<ClusterAssignments n={0}, cut depth: {1}\nClusters: \n{2}>'.format(self.n, self.cut_depth, clusters_repr)
def flatten(self):
"""
Flattens the data into a `pd.Series` object that gives a cluster number to every element in original data.
:return:
"""
buffer = np.empty(self._hierarchical_clustering_object.num_obs)
for i, cluster in enumerate(self.clusters):
queue = set([cluster.root])
while queue:
node = queue.pop()
if node.is_leaf():
buffer[node.id] = i + 1
else:
queue.add(node.get_left())
queue.add(node.get_right())
return pd.Series(buffer, index=self.hierarchical_clustering_object.data.index)
@property
def clusters(self):
"""
Returns the data clusters in decreasing number of elements
:return:
"""
return self._clusters
def __iter__(self):
return iter(self.clusters)
def __getitem__(self, key):
return self.clusters[key]
|
yaroslav-tarasov/avango
|
refs/heads/master
|
avango-blender/blender-addon/nodes/float_node.py
|
3
|
import bpy
from bpy.types import Node
from .. import node_tree
from .. import export_utils
class FloatNode(Node, node_tree.AvangoCustomTreeNode):
bl_idname = "FloatInputNode"
bl_label = "Float"
def init(self, context):
self.inputs.new("NodeSocketFloat", "Value")
self.outputs.new("NodeSocketFloat", "Value")
def to_dict(self):
return {
'type': 'Float',
'name': self.name,
'values': export_utils.export_values(self),
'field_connections': export_utils.export_links(self)
}
def execute(self, x):
return x
def update(self):
pass
def register():
bpy.utils.register_class(FloatNode)
def unregister():
bpy.utils.unregister_class(FloatNode)
|
mhostetter/gnuradio
|
refs/heads/master
|
gr-wxgui/python/wxgui/waterfallsink_nongl.py
|
58
|
#!/usr/bin/env python
#
# Copyright 2003-2005,2007,2008,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, fft, filter
from gnuradio import blocks
from gnuradio import analog
from gnuradio.wxgui import stdgui2
from gnuradio.filter import window
import wx
import gnuradio.wxgui.plot as plot
import numpy
import os
import math
default_fftsink_size = (640,240)
default_fft_rate = gr.prefs().get_long('wxgui', 'fft_rate', 15)
class waterfall_sink_base(object):
def __init__(self, input_is_real=False, baseband_freq=0,
sample_rate=1, fft_size=512,
fft_rate=default_fft_rate,
average=False, avg_alpha=None, title=''):
# initialize common attributes
self.baseband_freq = baseband_freq
self.sample_rate = sample_rate
self.fft_size = fft_size
self.fft_rate = fft_rate
self.average = average
if avg_alpha is None:
self.avg_alpha = 2.0 / fft_rate
else:
self.avg_alpha = avg_alpha
self.title = title
self.input_is_real = input_is_real
self.msgq = gr.msg_queue(2) # queue up to 2 messages
def set_average(self, average):
self.average = average
if average:
self.avg.set_taps(self.avg_alpha)
else:
self.avg.set_taps(1.0)
def set_avg_alpha(self, avg_alpha):
self.avg_alpha = avg_alpha
def set_baseband_freq(self, baseband_freq):
self.baseband_freq = baseband_freq
def set_sample_rate(self, sample_rate):
self.sample_rate = sample_rate
self._set_n()
def _set_n(self):
self.one_in_n.set_n(max(1, int(self.sample_rate/self.fft_size/self.fft_rate)))
def set_callback(self, callb):
return
class waterfall_sink_f(gr.hier_block2, waterfall_sink_base):
def __init__(self, parent, baseband_freq=0,
y_per_div=10, ref_level=50, sample_rate=1, fft_size=512,
fft_rate=default_fft_rate, average=False, avg_alpha=None,
title='', size=default_fftsink_size, **kwargs):
gr.hier_block2.__init__(self, "waterfall_sink_f",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(0,0,0))
waterfall_sink_base.__init__(self, input_is_real=True, baseband_freq=baseband_freq,
sample_rate=sample_rate, fft_size=fft_size,
fft_rate=fft_rate,
average=average, avg_alpha=avg_alpha, title=title)
self.s2p = blocks.stream_to_vector(gr.sizeof_float, self.fft_size)
self.one_in_n = blocks.keep_one_in_n(gr.sizeof_float * self.fft_size,
max(1, int(self.sample_rate/self.fft_size/self.fft_rate)))
mywindow = window.blackmanharris(self.fft_size)
self.fft = fft.fft_vfc(self.fft_size, True, mywindow)
self.c2mag = blocks.complex_to_mag(self.fft_size)
self.avg = filter.single_pole_iir_filter_ff(1.0, self.fft_size)
self.log = blocks.nlog10_ff(20, self.fft_size, -20*math.log10(self.fft_size))
self.sink = blocks.message_sink(gr.sizeof_float * self.fft_size, self.msgq, True)
self.connect(self, self.s2p, self.one_in_n, self.fft, self.c2mag, self.avg, self.log, self.sink)
self.win = waterfall_window(self, parent, size=size)
self.set_average(self.average)
class waterfall_sink_c(gr.hier_block2, waterfall_sink_base):
def __init__(self, parent, baseband_freq=0,
y_per_div=10, ref_level=50, sample_rate=1, fft_size=512,
fft_rate=default_fft_rate, average=False, avg_alpha=None,
title='', size=default_fftsink_size, **kwargs):
gr.hier_block2.__init__(self, "waterfall_sink_f",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(0,0,0))
waterfall_sink_base.__init__(self, input_is_real=False, baseband_freq=baseband_freq,
sample_rate=sample_rate, fft_size=fft_size,
fft_rate=fft_rate,
average=average, avg_alpha=avg_alpha, title=title)
self.s2p = blocks.stream_to_vector(gr.sizeof_gr_complex, self.fft_size)
self.one_in_n = blocks.keep_one_in_n(gr.sizeof_gr_complex * self.fft_size,
max(1, int(self.sample_rate/self.fft_size/self.fft_rate)))
mywindow = window.blackmanharris(self.fft_size)
self.fft = fft.fft_vcc(self.fft_size, True, mywindow)
self.c2mag = blocks.complex_to_mag(self.fft_size)
self.avg = filter.single_pole_iir_filter_ff(1.0, self.fft_size)
self.log = blocks.nlog10_ff(20, self.fft_size, -20*math.log10(self.fft_size))
self.sink = blocks.message_sink(gr.sizeof_float * self.fft_size, self.msgq, True)
self.connect(self, self.s2p, self.one_in_n, self.fft, self.c2mag, self.avg, self.log, self.sink)
self.win = waterfall_window(self, parent, size=size)
self.set_average(self.average)
# ------------------------------------------------------------------------
myDATA_EVENT = wx.NewEventType()
EVT_DATA_EVENT = wx.PyEventBinder (myDATA_EVENT, 0)
class DataEvent(wx.PyEvent):
def __init__(self, data):
wx.PyEvent.__init__(self)
self.SetEventType (myDATA_EVENT)
self.data = data
def Clone (self):
self.__class__ (self.GetId())
class input_watcher (gru.msgq_runner):
def __init__ (self, msgq, fft_size, event_receiver, **kwds):
self.fft_size = fft_size
self.event_receiver = event_receiver
gru.msgq_runner.__init__(self, msgq, self.handle_msg)
def handle_msg(self, msg):
itemsize = int(msg.arg1())
nitems = int(msg.arg2())
s = msg.to_string() # get the body of the msg as a string
# There may be more than one FFT frame in the message.
# If so, we take only the last one
if nitems > 1:
start = itemsize * (nitems - 1)
s = s[start:start+itemsize]
complex_data = numpy.fromstring (s, numpy.float32)
de = DataEvent (complex_data)
wx.PostEvent (self.event_receiver, de)
del de
class waterfall_window (wx.Panel):
def __init__ (self, fftsink, parent, id = -1,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = wx.DEFAULT_FRAME_STYLE, name = ""):
wx.Panel.__init__(self, parent, id, pos, size, style, name)
self.set_baseband_freq = fftsink.set_baseband_freq
self.fftsink = fftsink
self.bm = wx.EmptyBitmap(self.fftsink.fft_size, 300, -1)
self.scale_factor = 5.0 # FIXME should autoscale, or set this
dc1 = wx.MemoryDC()
dc1.SelectObject(self.bm)
dc1.Clear()
self.pens = self.make_pens()
wx.EVT_PAINT( self, self.OnPaint )
wx.EVT_CLOSE (self, self.on_close_window)
EVT_DATA_EVENT (self, self.set_data)
self.build_popup_menu()
wx.EVT_CLOSE (self, self.on_close_window)
self.Bind(wx.EVT_RIGHT_UP, self.on_right_click)
self.input_watcher = input_watcher(fftsink.msgq, fftsink.fft_size, self)
def on_close_window (self, event):
print "waterfall_window: on_close_window"
self.keep_running = False
def const_list(self,const,len):
return [const] * len
def make_colormap(self):
r = []
r.extend(self.const_list(0,96))
r.extend(range(0,255,4))
r.extend(self.const_list(255,64))
r.extend(range(255,128,-4))
g = []
g.extend(self.const_list(0,32))
g.extend(range(0,255,4))
g.extend(self.const_list(255,64))
g.extend(range(255,0,-4))
g.extend(self.const_list(0,32))
b = range(128,255,4)
b.extend(self.const_list(255,64))
b.extend(range(255,0,-4))
b.extend(self.const_list(0,96))
return (r,g,b)
def make_pens(self):
(r,g,b) = self.make_colormap()
pens = []
for i in range(0,256):
colour = wx.Colour(r[i], g[i], b[i])
pens.append( wx.Pen(colour, 2, wx.SOLID))
return pens
def OnPaint(self, event):
dc = wx.PaintDC(self)
self.DoDrawing(dc)
def DoDrawing(self, dc=None):
if dc is None:
dc = wx.ClientDC(self)
dc.DrawBitmap(self.bm, 0, 0, False )
def const_list(self,const,len):
a = [const]
for i in range(1,len):
a.append(const)
return a
def set_data (self, evt):
dB = evt.data
L = len (dB)
dc1 = wx.MemoryDC()
dc1.SelectObject(self.bm)
dc1.Blit(0,1,self.fftsink.fft_size,300,dc1,0,0,wx.COPY,False,-1,-1)
x = max(abs(self.fftsink.sample_rate), abs(self.fftsink.baseband_freq))
if x >= 1e9:
sf = 1e-9
units = "GHz"
elif x >= 1e6:
sf = 1e-6
units = "MHz"
else:
sf = 1e-3
units = "kHz"
if self.fftsink.input_is_real: # only plot 1/2 the points
d_max = L/2
p_width = 2
else:
d_max = L/2
p_width = 1
scale_factor = self.scale_factor
if self.fftsink.input_is_real: # real fft
for x_pos in range(0, d_max):
value = int(dB[x_pos] * scale_factor)
value = min(255, max(0, value))
dc1.SetPen(self.pens[value])
dc1.DrawRectangle(x_pos*p_width, 0, p_width, 2)
else: # complex fft
for x_pos in range(0, d_max): # positive freqs
value = int(dB[x_pos] * scale_factor)
value = min(255, max(0, value))
dc1.SetPen(self.pens[value])
dc1.DrawRectangle(x_pos*p_width + d_max, 0, p_width, 2)
for x_pos in range(0 , d_max): # negative freqs
value = int(dB[x_pos+d_max] * scale_factor)
value = min(255, max(0, value))
dc1.SetPen(self.pens[value])
dc1.DrawRectangle(x_pos*p_width, 0, p_width, 2)
del dc1
self.DoDrawing (None)
def on_average(self, evt):
# print "on_average"
self.fftsink.set_average(evt.IsChecked())
def on_right_click(self, event):
menu = self.popup_menu
for id, pred in self.checkmarks.items():
item = menu.FindItemById(id)
item.Check(pred())
self.PopupMenu(menu, event.GetPosition())
def build_popup_menu(self):
self.id_incr_ref_level = wx.NewId()
self.id_decr_ref_level = wx.NewId()
self.id_incr_y_per_div = wx.NewId()
self.id_decr_y_per_div = wx.NewId()
self.id_y_per_div_1 = wx.NewId()
self.id_y_per_div_2 = wx.NewId()
self.id_y_per_div_5 = wx.NewId()
self.id_y_per_div_10 = wx.NewId()
self.id_y_per_div_20 = wx.NewId()
self.id_average = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_average, id=self.id_average)
#self.Bind(wx.EVT_MENU, self.on_incr_ref_level, id=self.id_incr_ref_level)
#self.Bind(wx.EVT_MENU, self.on_decr_ref_level, id=self.id_decr_ref_level)
#self.Bind(wx.EVT_MENU, self.on_incr_y_per_div, id=self.id_incr_y_per_div)
#self.Bind(wx.EVT_MENU, self.on_decr_y_per_div, id=self.id_decr_y_per_div)
#self.Bind(wx.EVT_MENU, self.on_y_per_div, id=self.id_y_per_div_1)
#self.Bind(wx.EVT_MENU, self.on_y_per_div, id=self.id_y_per_div_2)
#self.Bind(wx.EVT_MENU, self.on_y_per_div, id=self.id_y_per_div_5)
#self.Bind(wx.EVT_MENU, self.on_y_per_div, id=self.id_y_per_div_10)
#self.Bind(wx.EVT_MENU, self.on_y_per_div, id=self.id_y_per_div_20)
# make a menu
menu = wx.Menu()
self.popup_menu = menu
menu.AppendCheckItem(self.id_average, "Average")
# menu.Append(self.id_incr_ref_level, "Incr Ref Level")
# menu.Append(self.id_decr_ref_level, "Decr Ref Level")
# menu.Append(self.id_incr_y_per_div, "Incr dB/div")
# menu.Append(self.id_decr_y_per_div, "Decr dB/div")
# menu.AppendSeparator()
# we'd use RadioItems for these, but they're not supported on Mac
#menu.AppendCheckItem(self.id_y_per_div_1, "1 dB/div")
#menu.AppendCheckItem(self.id_y_per_div_2, "2 dB/div")
#menu.AppendCheckItem(self.id_y_per_div_5, "5 dB/div")
#menu.AppendCheckItem(self.id_y_per_div_10, "10 dB/div")
#menu.AppendCheckItem(self.id_y_per_div_20, "20 dB/div")
self.checkmarks = {
self.id_average : lambda : self.fftsink.average
#self.id_y_per_div_1 : lambda : self.fftsink.y_per_div == 1,
#self.id_y_per_div_2 : lambda : self.fftsink.y_per_div == 2,
#self.id_y_per_div_5 : lambda : self.fftsink.y_per_div == 5,
#self.id_y_per_div_10 : lambda : self.fftsink.y_per_div == 10,
#self.id_y_per_div_20 : lambda : self.fftsink.y_per_div == 20,
}
def next_up(v, seq):
"""
Return the first item in seq that is > v.
"""
for s in seq:
if s > v:
return s
return v
def next_down(v, seq):
"""
Return the last item in seq that is < v.
"""
rseq = list(seq[:])
rseq.reverse()
for s in rseq:
if s < v:
return s
return v
# ----------------------------------------------------------------
# Standalone test app
# ----------------------------------------------------------------
class test_top_block (stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
fft_size = 512
# build our flow graph
input_rate = 20.000e3
# Generate a complex sinusoid
self.src1 = analog.sig_source_c(input_rate, analog.GR_SIN_WAVE, 5.75e3, 1000)
#src1 = analog.sig_source_c(input_rate, analog.GR_CONST_WAVE, 5.75e3, 1000)
# We add these throttle blocks so that this demo doesn't
# suck down all the CPU available. Normally you wouldn't use these.
self.thr1 = blocks.throttle(gr.sizeof_gr_complex, input_rate)
sink1 = waterfall_sink_c(panel, title="Complex Data", fft_size=fft_size,
sample_rate=input_rate, baseband_freq=100e3)
self.connect(self.src1, self.thr1, sink1)
vbox.Add(sink1.win, 1, wx.EXPAND)
# generate a real sinusoid
self.src2 = analog.sig_source_f(input_rate, analog.GR_SIN_WAVE, 5.75e3, 1000)
self.thr2 = blocks.throttle(gr.sizeof_float, input_rate)
sink2 = waterfall_sink_f(panel, title="Real Data", fft_size=fft_size,
sample_rate=input_rate, baseband_freq=100e3)
self.connect(self.src2, self.thr2, sink2)
vbox.Add(sink2.win, 1, wx.EXPAND)
def main ():
app = stdgui2.stdapp(test_top_block, "Waterfall Sink Test App")
app.MainLoop()
if __name__ == '__main__':
main()
|
atchariya/django-angular
|
refs/heads/master
|
djangular/forms/widgets.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.forms import widgets
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.forms.util import flatatt
class ChoiceFieldRenderer(widgets.ChoiceFieldRenderer):
def render(self):
"""
Outputs a <ul ng-form="name"> for this set of choice fields to nest an ngForm.
"""
start_tag = format_html('<ul {0}>', mark_safe(' '.join(self.field_attrs)))
output = [start_tag]
for widget in self:
output.append(format_html('<li>{0}</li>', force_text(widget)))
output.append('</ul>')
return mark_safe('\n'.join(output))
class CheckboxChoiceInput(widgets.CheckboxChoiceInput):
def tag(self, attrs=None):
attrs = attrs or self.attrs
name = '{0}.{1}'.format(self.name, self.choice_value)
tag_attrs = dict(attrs, type=self.input_type, name=name, value=self.choice_value)
if 'id' in attrs:
tag_attrs['id'] = '{0}_{1}'.format(attrs['id'], self.index)
if 'ng-model' in attrs:
tag_attrs['ng-model'] = '{0}.{1}'.format(attrs['ng-model'], self.choice_value)
if self.is_checked():
tag_attrs['checked'] = 'checked'
return format_html('<input{0} />', flatatt(tag_attrs))
class CheckboxFieldRendererMixin(object):
def __init__(self, name, value, attrs, choices):
attrs.pop('djng-error', None)
self.field_attrs = [format_html('ng-form="{0}"', name)]
if attrs.pop('multiple_checkbox_required', False):
field_names = [format_html('{0}.{1}', name, choice) for choice, dummy in choices]
self.field_attrs.append(format_html('validate-multiple-fields="{0}"', json.dumps(field_names)))
super(CheckboxFieldRendererMixin, self).__init__(name, value, attrs, choices)
class CheckboxFieldRenderer(CheckboxFieldRendererMixin, ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class CheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
"""
Form fields of type 'MultipleChoiceField' using the widget 'CheckboxSelectMultiple' must behave
slightly different from the original. This widget overrides the default functionality.
"""
renderer = CheckboxFieldRenderer
def implode_multi_values(self, name, data):
"""
Due to the way Angular organizes it model, when Form data is sent via a POST request,
then for this kind of widget, the posted data must to be converted into a format suitable
for Django's Form validation.
"""
mkeys = [k for k in data.keys() if k.startswith(name + '.')]
mvls = [data.pop(k)[0] for k in mkeys]
if mvls:
data.setlist(name, mvls)
def convert_ajax_data(self, field_data):
"""
Due to the way Angular organizes it model, when this Form data is sent using Ajax,
then for this kind of widget, the sent data has to be converted into a format suitable
for Django's Form validation.
"""
return [key for key, val in field_data.items() if val]
def get_field_attrs(self, field):
return {'multiple_checkbox_required': field.required}
class RadioFieldRendererMixin(object):
def __init__(self, name, value, attrs, choices):
attrs.pop('djng-error', None)
self.field_attrs = []
if attrs.pop('radio_select_required', False):
self.field_attrs.append(format_html('validate-multiple-fields="{0}"', name))
super(RadioFieldRendererMixin, self).__init__(name, value, attrs, choices)
class RadioFieldRenderer(RadioFieldRendererMixin, ChoiceFieldRenderer):
choice_input_class = widgets.RadioChoiceInput
class RadioSelect(widgets.RadioSelect):
"""
Form fields of type 'ChoiceField' using the widget 'RadioSelect' must behave
slightly different from the original. This widget overrides the default functionality.
"""
renderer = RadioFieldRenderer
def get_field_attrs(self, field):
return {'radio_select_required': field.required}
|
fparrel/vigicrues_tools
|
refs/heads/master
|
arpal_scrap.py
|
1
|
#!/usr/bin/env python
from arpal_get_stations import scrap
def main():
scrap()
if __name__=='__main__':
main()
|
schets/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/tests/test_dict_learning.py
|
40
|
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10*len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
iRGBit/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/DeleteDuplicateGeometries.py
|
10
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
DeleteDuplicateGeometries.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsGeometry, QgsFeatureRequest
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class DeleteDuplicateGeometries(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Delete duplicate geometries')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Cleaned')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
fields = layer.pendingFields()
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
layer.wkbType(), layer.crs())
features = vector.features(layer)
count = len(features)
total = 100.0 / float(count)
geoms = dict()
for count, f in enumerate(features):
geoms[f.id()] = QgsGeometry(f.geometry())
progress.setPercentage(int(count * total))
cleaned = dict(geoms)
for i, g in geoms.iteritems():
for j in cleaned.keys():
if i == j or i not in cleaned:
continue
if g.isGeosEqual(cleaned[j]):
del cleaned[j]
count = len(cleaned)
total = 100.0 / float(count)
request = QgsFeatureRequest().setFilterFids(cleaned.keys())
for count, f in enumerate(layer.getFeatures(request)):
writer.addFeature(f)
progress.setPercentage(int(count * total))
del writer
|
akshayka/projection-methods
|
refs/heads/master
|
projection_methods/oracles/utils.py
|
1
|
import numpy as np
from projection_methods.projectables.halfspace import Halfspace
def containing_halfspace(x_0, x_star, x):
"""Returns a halfspace containing a convex set
Args:
x_0 (array-like): query point
x_star (array-like): projection of x_0 onto a convex set
x (CVXPY Variable): variable to constrain
Returns:
Halfspace: a halfspace of the form a.T(x) <= b,
where a = x_0 - x_star, b = a.T(x_star)
"""
# a = x_0 - x_star
# a.T(y - x_star) <= 0 for all y in C
# <==> a.T(y) <= a.T(x_star) := b
a = x_0 - x_star
b = a.dot(x_star)
# TODO(akshayka): Return None if a is too close to 0
return Halfspace(x=x , a=a, b=b)
|
AriZuu/micropython
|
refs/heads/master
|
tests/import/pkg3/subpkg1/__init__.py
|
118
|
print("subpkg1 __name__:", __name__)
|
shft117/SteckerApp
|
refs/heads/master
|
erpnext/patches/v6_4/repost_gle_for_journal_entries_where_reference_name_missing.py
|
50
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
je_list = frappe.db.sql_list("""select distinct parent from `tabJournal Entry Account` je
where docstatus=1 and ifnull(reference_name, '') !='' and creation > '2015-03-01'
and not exists(select name from `tabGL Entry`
where voucher_type='Journal Entry' and voucher_no=je.parent
and against_voucher_type=je.reference_type
and against_voucher=je.reference_name)""")
for d in je_list:
print d
# delete existing gle
frappe.db.sql("delete from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s", d)
# repost gl entries
je = frappe.get_doc("Journal Entry", d)
je.make_gl_entries()
|
goodwillcoding/RIDE
|
refs/heads/master
|
src/robotide/validators/__init__.py
|
1
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from robotide import robotapi, utils
class _AbstractValidator(wx.PyValidator):
"""Implements methods to keep wxPython happy and some helper methods."""
def Clone(self):
return self.__class__()
def TransferFromWindow(self):
return True
def TransferToWindow(self):
return True
def Validate(self, win):
value = self.Window.Value
error = self._validate(value)
if error:
self._show_error(error)
return False
return True
def _show_error(self, message, title='Validation Error'):
ret = wx.MessageBox(message, title, style=wx.ICON_ERROR)
self._set_focus_to_text_control(self.Window)
return ret
def _set_focus_to_text_control(self, ctrl):
ctrl.SetFocus()
ctrl.SelectAll()
class TimeoutValidator(_AbstractValidator):
def _validate(self, value):
time_tokens = utils.split_value(value)
if not time_tokens:
return None
timestr = time_tokens[0]
try:
secs = utils.timestr_to_secs(timestr)
if secs <= 0:
raise ValueError("Timestring must be over zero")
time_tokens[0] = utils.secs_to_timestr(secs)
except ValueError, err:
if '${' not in timestr:
return str(err)
self._set_window_value(utils.join_value(time_tokens))
return None
def _set_window_value(self, value):
self.Window.SetValue(value)
class ArgumentsValidator(_AbstractValidator):
def _validate(self, args_str):
try:
types = [self._get_type(arg)
for arg in utils.split_value(args_str)]
except ValueError:
return "Invalid argument syntax '%s'" % arg
return self._validate_list_args_in_correct_place(
types) or self._validate_req_args_in_correct_place(types) or None
def _get_type(self, arg):
if robotapi.is_scalar_var(arg):
return 1
elif robotapi.is_scalar_var(arg.split("=")[0]):
return 2
elif robotapi.is_list_var(arg):
return 3
else:
raise ValueError
def _validate_list_args_in_correct_place(self, types):
if 3 in types and types.index(3) != len(types) - 1:
return "List variable allowed only as the last argument"
return None
def _validate_req_args_in_correct_place(self, types):
prev = 0
for t in types:
if t < prev:
return ("Required arguments not allowed after arguments "
"with default values.")
prev = t
return None
class NonEmptyValidator(_AbstractValidator):
def __init__(self, field_name):
_AbstractValidator.__init__(self)
self._field_name = field_name
def Clone(self):
return self.__class__(self._field_name)
def _validate(self, value):
if not value:
return '%s cannot be empty' % self._field_name
return None
class SuiteFileNameValidator(NonEmptyValidator):
def __init__(self, field_name, is_dir_type):
NonEmptyValidator.__init__(self, field_name)
self._is_dir_type = is_dir_type
def Clone(self):
return self.__class__(self._field_name, self._is_dir_type)
def _validate(self, value):
validity = NonEmptyValidator._validate(self, value)
if not self._is_dir_type() and not validity:
if value.lower() == '__init__':
return 'Invalid suite file name "%s"' % value
return validity
class DirectoryExistsValidator(_AbstractValidator):
def _validate(self, value):
if not os.path.isdir(value):
return 'Chosen directory must exist'
return None
class NewSuitePathValidator(_AbstractValidator):
def _validate(self, value):
path = os.path.normpath(value)
if os.path.exists(path):
return 'Target file or directory must not exist'
parentdir, filename = os.path.split(path)
if '__init__' in filename:
parentdir = os.path.dirname(parentdir)
if not os.path.exists(parentdir):
return 'Parent directory must exist'
return None
class _NameValidator(_AbstractValidator):
def __init__(self, controller, orig_name=None):
_AbstractValidator.__init__(self)
self._controller = controller
self._orig_name = orig_name
def Clone(self):
return self.__class__(self._controller, self._orig_name)
def _validate(self, name):
if self._orig_name is not None and utils.eq(
name, self._orig_name, ignore=['_']):
return ''
return self._validation_method(name).error_message
class TestCaseNameValidator(_NameValidator):
@property
def _validation_method(self):
return self._controller.validate_test_name
class UserKeywordNameValidator(_NameValidator):
@property
def _validation_method(self):
return self._controller.validate_keyword_name
class ScalarVariableNameValidator(_NameValidator):
@property
def _validation_method(self):
return self._controller.validate_scalar_variable_name
class ListVariableNameValidator(_NameValidator):
@property
def _validation_method(self):
return self._controller.validate_list_variable_name
|
Endika/odoomrp-utils
|
refs/heads/8.0
|
l10n_eu_product_adr_report/__init__.py
|
379
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import models
|
jaumebonet/pynion
|
refs/heads/master
|
pynion/abstractclass/__init__.py
|
2
|
"""Abstract classes in **pynion** are designed in order to help users develop
their classes by adding a set of default functions and properties.
.. moduleauthor:: Jaume Bonet <jaume.bonet@gmail.com>
"""
from .jsoner import JSONer
__all__ = ["JSONer"]
|
Kittima/kittima.github.io
|
refs/heads/master
|
code/python/IPython-notebook-extensions/nbextensions/usability/latex_envs/conversion/toc_and_cln.py
|
3
|
"""
Created on Thu Nov 18 15:34:38 2014
@author: JF
"""
import glob
import os
import sys
import time
from stat import *
def texheaders_filtering(input_file):
import re
st = os.stat(input_file)
atime = st[ST_ATIME] #access time
mtime = st[ST_MTIME] #modification time
with open(input_file,'rt') as f:
text=f.read()
#print(text)
my_texfile = input_file #file.split('.html')[0] + 'b.html'
if sys.version_info >= (3,0,0):
my_texfile_desc = open(my_texfile, 'wt', newline='')
else:
my_texfile_desc = open(my_texfile_file, 'wt')
def remp(intext):
#out=re.findall('\\\\[sub]?section',intext.group(0))
out=re.findall('(\\\\[sub]?section|\\\\chapter)',intext.group(0))
return out[-1]
# Remove Table of Contents section
newtext=re.sub(r'\\section{Table of Contents}([\s\S]*?)(?=(?:\\[sub]?section|\\chapter))','',text,flags=re.M)
# Remove References section
newtext=re.sub(r'\\section{References}[\S\s]*?(?=(?:\\[sub]*section|\\chapter|\\end{document}|\Z))','',newtext,flags=re.M)
newtext=re.sub('\\\\begin{verbatim}[\s]*?<matplotlib\.[\S ]*?>[\s]*?\\\\end{verbatim}','',newtext,flags=re.M)
newtext=re.sub('\\\\begin{verbatim}[\s]*?<IPython\.core\.display[\S ]*?>[\s]*?\\\\end{verbatim}','',newtext,flags=re.M)
#bottom page with links to Index/back/next (suppress this)
#'----[\s]*?<div align=right> [Index](toc.ipynb)[\S ]*?.ipynb\)</div>'
newtext=re.sub('\\\\begin{center}\\\\rule{3in}{0.4pt}\\\\end{center}[\s]*?\\\\href{toc.ipynb}{Index}[\S\s ]*?.ipynb}{Next}','',newtext,flags=re.M)
# Looks for figcaption in the text. Then for the included image with \adjustimage...Then extracts caption and label from the figcaption
# and redraws the figure using a figure environment and an \includegraphics
# figcaption(text,label=)
tofind="figcaption\(([\s\S]*?)\)\n([\s\S]*?)\\\\begin{center}\s*\\\\adjustimage[\s\S]*?}}{([\S]*?)}\s*\\\\end{center}"
def replacement(text):
cap=re.match("\"([\S\s]*?)\",[\S\s]*?label=\"([\S]*?)\"",text.group(1))
if cap==None:
cap=re.match("\"([\S\s]*?)\"",text.group(1))
caption=cap.group(1)
label=""
rep="\n%s\n\\begin{figure}[H]\n\\centering\n\\includegraphics[width=0.6\\linewidth]{%s}\n\\caption{%s}\n\\end{figure}" % (text.group(2),text.group(3),caption)
else:
caption=cap.group(1)
label=cap.group(2)
rep="\n%s\n\\begin{figure}[H]\n\\centering\n\\includegraphics[width=0.6\\linewidth]{%s}\n\\caption{%s}\n\\label{%s}\n\\end{figure}" % (text.group(2),text.group(3),caption,label)
return rep
code="Init"
while (code!=None):
code=re.search(tofind,newtext)
newtext=re.sub(tofind,replacement,newtext,flags=re.M)
my_texfile_desc.write(newtext)
#modify the file timestamp
my_texfile_desc.close()
os.utime(my_texfile,(atime,mtime))
verbose=True
if __name__ == '__main__':
import argparse
whatitdoes="""This program filters a LaTeX file \n
- in order to remove the first 'table of contents section',
to the next section ;
- it replaces figcaption structures to "\\caption{\\label{}}" LaTeX constructs.
- Finally, it also filters out various 'spurious' outputs"""
myself="(c) JFB 2014"
parser = argparse.ArgumentParser(description=whatitdoes, epilog=myself)
# mandatory argument
parser.add_argument(
help = 'List of files to filter (accepts regular expressions)',
dest = 'argfiles', default = '*.tex', type = str, nargs = '*')
# verbosity flag
parser.add_argument('-v','--verbose', help = 'Prints information',
dest = 'verbose', default = False, #action='store_true'
action='count')
arguments = parser.parse_args()
verbose=arguments.verbose
if verbose==2: print("script arg: ", arguments.argfiles)
if isinstance(arguments.argfiles,list):
tex_files=[]
for x in arguments.argfiles:
tex_files = tex_files+glob.glob(x)
else:
tex_files = glob.glob(arguments.argfiles)
if verbose==2: print("glog.glog expansion: ", tex_files, '\n')
if len(tex_files ) == 0:
raise RuntimeError('No TeX files to convert.')
for file in tex_files:
if verbose:
print("Filtering {}".format(file))
texheaders_filtering(file)
|
Architektor/PySnip
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twisted/words/xish/domish.py
|
48
|
# -*- test-case-name: twisted.words.test.test_domish -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DOM-like XML processing support.
This module provides support for parsing XML into DOM-like object structures
and serializing such structures to an XML string representation, optimized
for use in streaming XML applications.
"""
import types
from zope.interface import implements, Interface, Attribute
def _splitPrefix(name):
""" Internal method for splitting a prefixed Element name into its
respective parts """
ntok = name.split(":", 1)
if len(ntok) == 2:
return ntok
else:
return (None, ntok[0])
# Global map of prefixes that always get injected
# into the serializers prefix map (note, that doesn't
# mean they're always _USED_)
G_PREFIXES = { "http://www.w3.org/XML/1998/namespace":"xml" }
class _ListSerializer:
""" Internal class which serializes an Element tree into a buffer """
def __init__(self, prefixes=None, prefixesInScope=None):
self.writelist = []
self.prefixes = {}
if prefixes:
self.prefixes.update(prefixes)
self.prefixes.update(G_PREFIXES)
self.prefixStack = [G_PREFIXES.values()] + (prefixesInScope or [])
self.prefixCounter = 0
def getValue(self):
return u"".join(self.writelist)
def getPrefix(self, uri):
if uri not in self.prefixes:
self.prefixes[uri] = "xn%d" % (self.prefixCounter)
self.prefixCounter = self.prefixCounter + 1
return self.prefixes[uri]
def prefixInScope(self, prefix):
stack = self.prefixStack
for i in range(-1, (len(self.prefixStack)+1) * -1, -1):
if prefix in stack[i]:
return True
return False
def serialize(self, elem, closeElement=1, defaultUri=''):
# Optimization shortcuts
write = self.writelist.append
# Shortcut, check to see if elem is actually a chunk o' serialized XML
if isinstance(elem, SerializedXML):
write(elem)
return
# Shortcut, check to see if elem is actually a string (aka Cdata)
if isinstance(elem, types.StringTypes):
write(escapeToXml(elem))
return
# Further optimizations
name = elem.name
uri = elem.uri
defaultUri, currentDefaultUri = elem.defaultUri, defaultUri
for p, u in elem.localPrefixes.iteritems():
self.prefixes[u] = p
self.prefixStack.append(elem.localPrefixes.keys())
# Inherit the default namespace
if defaultUri is None:
defaultUri = currentDefaultUri
if uri is None:
uri = defaultUri
prefix = None
if uri != defaultUri or uri in self.prefixes:
prefix = self.getPrefix(uri)
inScope = self.prefixInScope(prefix)
# Create the starttag
if not prefix:
write("<%s" % (name))
else:
write("<%s:%s" % (prefix, name))
if not inScope:
write(" xmlns:%s='%s'" % (prefix, uri))
self.prefixStack[-1].append(prefix)
inScope = True
if defaultUri != currentDefaultUri and \
(uri != defaultUri or not prefix or not inScope):
write(" xmlns='%s'" % (defaultUri))
for p, u in elem.localPrefixes.iteritems():
write(" xmlns:%s='%s'" % (p, u))
# Serialize attributes
for k,v in elem.attributes.items():
# If the attribute name is a tuple, it's a qualified attribute
if isinstance(k, types.TupleType):
attr_uri, attr_name = k
attr_prefix = self.getPrefix(attr_uri)
if not self.prefixInScope(attr_prefix):
write(" xmlns:%s='%s'" % (attr_prefix, attr_uri))
self.prefixStack[-1].append(attr_prefix)
write(" %s:%s='%s'" % (attr_prefix, attr_name,
escapeToXml(v, 1)))
else:
write((" %s='%s'" % ( k, escapeToXml(v, 1))))
# Shortcut out if this is only going to return
# the element (i.e. no children)
if closeElement == 0:
write(">")
return
# Serialize children
if len(elem.children) > 0:
write(">")
for c in elem.children:
self.serialize(c, defaultUri=defaultUri)
# Add closing tag
if not prefix:
write("</%s>" % (name))
else:
write("</%s:%s>" % (prefix, name))
else:
write("/>")
self.prefixStack.pop()
SerializerClass = _ListSerializer
def escapeToXml(text, isattrib = 0):
""" Escape text to proper XML form, per section 2.3 in the XML specification.
@type text: C{str}
@param text: Text to escape
@type isattrib: C{bool}
@param isattrib: Triggers escaping of characters necessary for use as
attribute values
"""
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
if isattrib == 1:
text = text.replace("'", "'")
text = text.replace("\"", """)
return text
def unescapeFromXml(text):
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("'", "'")
text = text.replace(""", "\"")
text = text.replace("&", "&")
return text
def generateOnlyInterface(list, int):
""" Filters items in a list by class
"""
for n in list:
if int.providedBy(n):
yield n
def generateElementsQNamed(list, name, uri):
""" Filters Element items in a list with matching name and URI. """
for n in list:
if IElement.providedBy(n) and n.name == name and n.uri == uri:
yield n
def generateElementsNamed(list, name):
""" Filters Element items in a list with matching name, regardless of URI.
"""
for n in list:
if IElement.providedBy(n) and n.name == name:
yield n
class SerializedXML(unicode):
""" Marker class for pre-serialized XML in the DOM. """
pass
class Namespace:
""" Convenience object for tracking namespace declarations. """
def __init__(self, uri):
self._uri = uri
def __getattr__(self, n):
return (self._uri, n)
def __getitem__(self, n):
return (self._uri, n)
class IElement(Interface):
"""
Interface to XML element nodes.
See L{Element} for a detailed example of its general use.
Warning: this Interface is not yet complete!
"""
uri = Attribute(""" Element's namespace URI """)
name = Attribute(""" Element's local name """)
defaultUri = Attribute(""" Default namespace URI of child elements """)
attributes = Attribute(""" Dictionary of element attributes """)
children = Attribute(""" List of child nodes """)
parent = Attribute(""" Reference to element's parent element """)
localPrefixes = Attribute(""" Dictionary of local prefixes """)
def toXml(prefixes=None, closeElement=1, defaultUri='',
prefixesInScope=None):
""" Serializes object to a (partial) XML document
@param prefixes: dictionary that maps namespace URIs to suggested
prefix names.
@type prefixes: L{dict}
@param closeElement: flag that determines whether to include the
closing tag of the element in the serialized
string. A value of C{0} only generates the
element's start tag. A value of C{1} yields a
complete serialization.
@type closeElement: C{int}
@param defaultUri: Initial default namespace URI. This is most useful
for partial rendering, where the logical parent
element (of which the starttag was already
serialized) declares a default namespace that should
be inherited.
@type defaultUri: C{str}
@param prefixesInScope: list of prefixes that are assumed to be
declared by ancestors.
@type prefixesInScope: C{list}
@return: (partial) serialized XML
@rtype: C{unicode}
"""
def addElement(name, defaultUri = None, content = None):
""" Create an element and add as child.
The new element is added to this element as a child, and will have
this element as its parent.
@param name: element name. This can be either a C{unicode} object that
contains the local name, or a tuple of (uri, local_name)
for a fully qualified name. In the former case,
the namespace URI is inherited from this element.
@type name: C{unicode} or C{tuple} of (C{unicode}, C{unicode})
@param defaultUri: default namespace URI for child elements. If
C{None}, this is inherited from this element.
@type defaultUri: C{unicode}
@param content: text contained by the new element.
@type content: C{unicode}
@return: the created element
@rtype: object providing L{IElement}
"""
def addChild(node):
""" Adds a node as child of this element.
The C{node} will be added to the list of childs of this element, and
will have this element set as its parent when C{node} provides
L{IElement}.
@param node: the child node.
@type node: C{unicode} or object implementing L{IElement}
"""
class Element(object):
""" Represents an XML element node.
An Element contains a series of attributes (name/value pairs), content
(character data), and other child Element objects. When building a document
with markup (such as HTML or XML), use this object as the starting point.
Element objects fully support XML Namespaces. The fully qualified name of
the XML Element it represents is stored in the C{uri} and C{name}
attributes, where C{uri} holds the namespace URI. There is also a default
namespace, for child elements. This is stored in the C{defaultUri}
attribute. Note that C{''} means the empty namespace.
Serialization of Elements through C{toXml()} will use these attributes
for generating proper serialized XML. When both C{uri} and C{defaultUri}
are not None in the Element and all of its descendents, serialization
proceeds as expected:
>>> from twisted.words.xish import domish
>>> root = domish.Element(('myns', 'root'))
>>> root.addElement('child', content='test')
<twisted.words.xish.domish.Element object at 0x83002ac>
>>> root.toXml()
u"<root xmlns='myns'><child>test</child></root>"
For partial serialization, needed for streaming XML, a special value for
namespace URIs can be used: C{None}.
Using C{None} as the value for C{uri} means: this element is in whatever
namespace inherited by the closest logical ancestor when the complete XML
document has been serialized. The serialized start tag will have a
non-prefixed name, and no xmlns declaration will be generated.
Similarly, C{None} for C{defaultUri} means: the default namespace for my
child elements is inherited from the logical ancestors of this element,
when the complete XML document has been serialized.
To illustrate, an example from a Jabber stream. Assume the start tag of the
root element of the stream has already been serialized, along with several
complete child elements, and sent off, looking like this::
<stream:stream xmlns:stream='http://etherx.jabber.org/streams'
xmlns='jabber:client' to='example.com'>
...
Now suppose we want to send a complete element represented by an
object C{message} created like:
>>> message = domish.Element((None, 'message'))
>>> message['to'] = 'user@example.com'
>>> message.addElement('body', content='Hi!')
<twisted.words.xish.domish.Element object at 0x8276e8c>
>>> message.toXml()
u"<message to='user@example.com'><body>Hi!</body></message>"
As, you can see, this XML snippet has no xmlns declaration. When sent
off, it inherits the C{jabber:client} namespace from the root element.
Note that this renders the same as using C{''} instead of C{None}:
>>> presence = domish.Element(('', 'presence'))
>>> presence.toXml()
u"<presence/>"
However, if this object has a parent defined, the difference becomes
clear:
>>> child = message.addElement(('http://example.com/', 'envelope'))
>>> child.addChild(presence)
<twisted.words.xish.domish.Element object at 0x8276fac>
>>> message.toXml()
u"<message to='user@example.com'><body>Hi!</body><envelope xmlns='http://example.com/'><presence xmlns=''/></envelope></message>"
As, you can see, the <presence/> element is now in the empty namespace, not
in the default namespace of the parent or the streams'.
@type uri: C{unicode} or None
@ivar uri: URI of this Element's name
@type name: C{unicode}
@ivar name: Name of this Element
@type defaultUri: C{unicode} or None
@ivar defaultUri: URI this Element exists within
@type children: C{list}
@ivar children: List of child Elements and content
@type parent: L{Element}
@ivar parent: Reference to the parent Element, if any.
@type attributes: L{dict}
@ivar attributes: Dictionary of attributes associated with this Element.
@type localPrefixes: L{dict}
@ivar localPrefixes: Dictionary of namespace declarations on this
element. The key is the prefix to bind the
namespace uri to.
"""
implements(IElement)
_idCounter = 0
def __init__(self, qname, defaultUri=None, attribs=None,
localPrefixes=None):
"""
@param qname: Tuple of (uri, name)
@param defaultUri: The default URI of the element; defaults to the URI
specified in C{qname}
@param attribs: Dictionary of attributes
@param localPrefixes: Dictionary of namespace declarations on this
element. The key is the prefix to bind the
namespace uri to.
"""
self.localPrefixes = localPrefixes or {}
self.uri, self.name = qname
if defaultUri is None and \
self.uri not in self.localPrefixes.itervalues():
self.defaultUri = self.uri
else:
self.defaultUri = defaultUri
self.attributes = attribs or {}
self.children = []
self.parent = None
def __getattr__(self, key):
# Check child list for first Element with a name matching the key
for n in self.children:
if IElement.providedBy(n) and n.name == key:
return n
# Tweak the behaviour so that it's more friendly about not
# finding elements -- we need to document this somewhere :)
if key.startswith('_'):
raise AttributeError(key)
else:
return None
def __getitem__(self, key):
return self.attributes[self._dqa(key)]
def __delitem__(self, key):
del self.attributes[self._dqa(key)];
def __setitem__(self, key, value):
self.attributes[self._dqa(key)] = value
def __str__(self):
""" Retrieve the first CData (content) node
"""
for n in self.children:
if isinstance(n, types.StringTypes): return n
return ""
def _dqa(self, attr):
""" Dequalify an attribute key as needed """
if isinstance(attr, types.TupleType) and not attr[0]:
return attr[1]
else:
return attr
def getAttribute(self, attribname, default = None):
""" Retrieve the value of attribname, if it exists """
return self.attributes.get(attribname, default)
def hasAttribute(self, attrib):
""" Determine if the specified attribute exists """
return self._dqa(attrib) in self.attributes
def compareAttribute(self, attrib, value):
""" Safely compare the value of an attribute against a provided value.
C{None}-safe.
"""
return self.attributes.get(self._dqa(attrib), None) == value
def swapAttributeValues(self, left, right):
""" Swap the values of two attribute. """
d = self.attributes
l = d[left]
d[left] = d[right]
d[right] = l
def addChild(self, node):
""" Add a child to this Element. """
if IElement.providedBy(node):
node.parent = self
self.children.append(node)
return self.children[-1]
def addContent(self, text):
""" Add some text data to this Element. """
c = self.children
if len(c) > 0 and isinstance(c[-1], types.StringTypes):
c[-1] = c[-1] + text
else:
c.append(text)
return c[-1]
def addElement(self, name, defaultUri = None, content = None):
result = None
if isinstance(name, type(())):
if defaultUri is None:
defaultUri = name[0]
self.children.append(Element(name, defaultUri))
else:
if defaultUri is None:
defaultUri = self.defaultUri
self.children.append(Element((defaultUri, name), defaultUri))
result = self.children[-1]
result.parent = self
if content:
result.children.append(content)
return result
def addRawXml(self, rawxmlstring):
""" Add a pre-serialized chunk o' XML as a child of this Element. """
self.children.append(SerializedXML(rawxmlstring))
def addUniqueId(self):
""" Add a unique (across a given Python session) id attribute to this
Element.
"""
self.attributes["id"] = "H_%d" % Element._idCounter
Element._idCounter = Element._idCounter + 1
def elements(self, uri=None, name=None):
"""
Iterate across all children of this Element that are Elements.
Returns a generator over the child elements. If both the C{uri} and
C{name} parameters are set, the returned generator will only yield
on elements matching the qualified name.
@param uri: Optional element URI.
@type uri: C{unicode}
@param name: Optional element name.
@type name: C{unicode}
@return: Iterator that yields objects implementing L{IElement}.
"""
if name is None:
return generateOnlyInterface(self.children, IElement)
else:
return generateElementsQNamed(self.children, name, uri)
def toXml(self, prefixes=None, closeElement=1, defaultUri='',
prefixesInScope=None):
""" Serialize this Element and all children to a string. """
s = SerializerClass(prefixes=prefixes, prefixesInScope=prefixesInScope)
s.serialize(self, closeElement=closeElement, defaultUri=defaultUri)
return s.getValue()
def firstChildElement(self):
for c in self.children:
if IElement.providedBy(c):
return c
return None
class ParserError(Exception):
""" Exception thrown when a parsing error occurs """
pass
def elementStream():
""" Preferred method to construct an ElementStream
Uses Expat-based stream if available, and falls back to Sux if necessary.
"""
try:
es = ExpatElementStream()
return es
except ImportError:
if SuxElementStream is None:
raise Exception("No parsers available :(")
es = SuxElementStream()
return es
try:
from twisted.web import sux
except:
SuxElementStream = None
else:
class SuxElementStream(sux.XMLParser):
def __init__(self):
self.connectionMade()
self.DocumentStartEvent = None
self.ElementEvent = None
self.DocumentEndEvent = None
self.currElem = None
self.rootElem = None
self.documentStarted = False
self.defaultNsStack = []
self.prefixStack = []
def parse(self, buffer):
try:
self.dataReceived(buffer)
except sux.ParseError, e:
raise ParserError, str(e)
def findUri(self, prefix):
# Walk prefix stack backwards, looking for the uri
# matching the specified prefix
stack = self.prefixStack
for i in range(-1, (len(self.prefixStack)+1) * -1, -1):
if prefix in stack[i]:
return stack[i][prefix]
return None
def gotTagStart(self, name, attributes):
defaultUri = None
localPrefixes = {}
attribs = {}
uri = None
# Pass 1 - Identify namespace decls
for k, v in attributes.items():
if k.startswith("xmlns"):
x, p = _splitPrefix(k)
if (x is None): # I.e. default declaration
defaultUri = v
else:
localPrefixes[p] = v
del attributes[k]
# Push namespace decls onto prefix stack
self.prefixStack.append(localPrefixes)
# Determine default namespace for this element; if there
# is one
if defaultUri is None:
if len(self.defaultNsStack) > 0:
defaultUri = self.defaultNsStack[-1]
else:
defaultUri = ''
# Fix up name
prefix, name = _splitPrefix(name)
if prefix is None: # This element is in the default namespace
uri = defaultUri
else:
# Find the URI for the prefix
uri = self.findUri(prefix)
# Pass 2 - Fix up and escape attributes
for k, v in attributes.items():
p, n = _splitPrefix(k)
if p is None:
attribs[n] = v
else:
attribs[(self.findUri(p)), n] = unescapeFromXml(v)
# Construct the actual Element object
e = Element((uri, name), defaultUri, attribs, localPrefixes)
# Save current default namespace
self.defaultNsStack.append(defaultUri)
# Document already started
if self.documentStarted:
# Starting a new packet
if self.currElem is None:
self.currElem = e
# Adding to existing element
else:
self.currElem = self.currElem.addChild(e)
# New document
else:
self.rootElem = e
self.documentStarted = True
self.DocumentStartEvent(e)
def gotText(self, data):
if self.currElem != None:
self.currElem.addContent(data)
def gotCData(self, data):
if self.currElem != None:
self.currElem.addContent(data)
def gotComment(self, data):
# Ignore comments for the moment
pass
entities = { "amp" : "&",
"lt" : "<",
"gt" : ">",
"apos": "'",
"quot": "\"" }
def gotEntityReference(self, entityRef):
# If this is an entity we know about, add it as content
# to the current element
if entityRef in SuxElementStream.entities:
self.currElem.addContent(SuxElementStream.entities[entityRef])
def gotTagEnd(self, name):
# Ensure the document hasn't already ended
if self.rootElem is None:
# XXX: Write more legible explanation
raise ParserError, "Element closed after end of document."
# Fix up name
prefix, name = _splitPrefix(name)
if prefix is None:
uri = self.defaultNsStack[-1]
else:
uri = self.findUri(prefix)
# End of document
if self.currElem is None:
# Ensure element name and uri matches
if self.rootElem.name != name or self.rootElem.uri != uri:
raise ParserError, "Mismatched root elements"
self.DocumentEndEvent()
self.rootElem = None
# Other elements
else:
# Ensure the tag being closed matches the name of the current
# element
if self.currElem.name != name or self.currElem.uri != uri:
# XXX: Write more legible explanation
raise ParserError, "Malformed element close"
# Pop prefix and default NS stack
self.prefixStack.pop()
self.defaultNsStack.pop()
# Check for parent null parent of current elem;
# that's the top of the stack
if self.currElem.parent is None:
self.currElem.parent = self.rootElem
self.ElementEvent(self.currElem)
self.currElem = None
# Anything else is just some element wrapping up
else:
self.currElem = self.currElem.parent
class ExpatElementStream:
def __init__(self):
import pyexpat
self.DocumentStartEvent = None
self.ElementEvent = None
self.DocumentEndEvent = None
self.error = pyexpat.error
self.parser = pyexpat.ParserCreate("UTF-8", " ")
self.parser.StartElementHandler = self._onStartElement
self.parser.EndElementHandler = self._onEndElement
self.parser.CharacterDataHandler = self._onCdata
self.parser.StartNamespaceDeclHandler = self._onStartNamespace
self.parser.EndNamespaceDeclHandler = self._onEndNamespace
self.currElem = None
self.defaultNsStack = ['']
self.documentStarted = 0
self.localPrefixes = {}
def parse(self, buffer):
try:
self.parser.Parse(buffer)
except self.error, e:
raise ParserError, str(e)
def _onStartElement(self, name, attrs):
# Generate a qname tuple from the provided name. See
# http://docs.python.org/library/pyexpat.html#xml.parsers.expat.ParserCreate
# for an explanation of the formatting of name.
qname = name.rsplit(" ", 1)
if len(qname) == 1:
qname = ('', name)
# Process attributes
for k, v in attrs.items():
if " " in k:
aqname = k.rsplit(" ", 1)
attrs[(aqname[0], aqname[1])] = v
del attrs[k]
# Construct the new element
e = Element(qname, self.defaultNsStack[-1], attrs, self.localPrefixes)
self.localPrefixes = {}
# Document already started
if self.documentStarted == 1:
if self.currElem != None:
self.currElem.children.append(e)
e.parent = self.currElem
self.currElem = e
# New document
else:
self.documentStarted = 1
self.DocumentStartEvent(e)
def _onEndElement(self, _):
# Check for null current elem; end of doc
if self.currElem is None:
self.DocumentEndEvent()
# Check for parent that is None; that's
# the top of the stack
elif self.currElem.parent is None:
self.ElementEvent(self.currElem)
self.currElem = None
# Anything else is just some element in the current
# packet wrapping up
else:
self.currElem = self.currElem.parent
def _onCdata(self, data):
if self.currElem != None:
self.currElem.addContent(data)
def _onStartNamespace(self, prefix, uri):
# If this is the default namespace, put
# it on the stack
if prefix is None:
self.defaultNsStack.append(uri)
else:
self.localPrefixes[prefix] = uri
def _onEndNamespace(self, prefix):
# Remove last element on the stack
if prefix is None:
self.defaultNsStack.pop()
## class FileParser(ElementStream):
## def __init__(self):
## ElementStream.__init__(self)
## self.DocumentStartEvent = self.docStart
## self.ElementEvent = self.elem
## self.DocumentEndEvent = self.docEnd
## self.done = 0
## def docStart(self, elem):
## self.document = elem
## def elem(self, elem):
## self.document.addChild(elem)
## def docEnd(self):
## self.done = 1
## def parse(self, filename):
## for l in open(filename).readlines():
## self.parser.Parse(l)
## assert self.done == 1
## return self.document
## def parseFile(filename):
## return FileParser().parse(filename)
|
yencarnacion/jaikuengine
|
refs/heads/master
|
.google_appengine/lib/cherrypy/cherrypy/test/logtest.py
|
36
|
"""logtest, a unittest.TestCase helper for testing log output."""
import sys
import time
import cherrypy
from cherrypy._cpcompat import basestring, ntob, unicodestr
try:
# On Windows, msvcrt.getch reads a single char without output.
import msvcrt
def getchar():
return msvcrt.getch()
except ImportError:
# Unix getchr
import tty, termios
def getchar():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class LogCase(object):
"""unittest.TestCase mixin for testing log messages.
logfile: a filename for the desired log. Yes, I know modes are evil,
but it makes the test functions so much cleaner to set this once.
lastmarker: the last marker in the log. This can be used to search for
messages since the last marker.
markerPrefix: a string with which to prefix log markers. This should be
unique enough from normal log output to use for marker identification.
"""
logfile = None
lastmarker = None
markerPrefix = ntob("test suite marker: ")
def _handleLogError(self, msg, data, marker, pattern):
print("")
print(" ERROR: %s" % msg)
if not self.interactive:
raise self.failureException(msg)
p = " Show: [L]og [M]arker [P]attern; [I]gnore, [R]aise, or sys.e[X]it >> "
sys.stdout.write(p + ' ')
# ARGH
sys.stdout.flush()
while True:
i = getchar().upper()
if i not in "MPLIRX":
continue
print(i.upper()) # Also prints new line
if i == "L":
for x, line in enumerate(data):
if (x + 1) % self.console_height == 0:
# The \r and comma should make the next line overwrite
sys.stdout.write("<-- More -->\r ")
m = getchar().lower()
# Erase our "More" prompt
sys.stdout.write(" \r ")
if m == "q":
break
print(line.rstrip())
elif i == "M":
print(repr(marker or self.lastmarker))
elif i == "P":
print(repr(pattern))
elif i == "I":
# return without raising the normal exception
return
elif i == "R":
raise self.failureException(msg)
elif i == "X":
self.exit()
sys.stdout.write(p + ' ')
def exit(self):
sys.exit()
def emptyLog(self):
"""Overwrite self.logfile with 0 bytes."""
open(self.logfile, 'wb').write("")
def markLog(self, key=None):
"""Insert a marker line into the log and set self.lastmarker."""
if key is None:
key = str(time.time())
self.lastmarker = key
open(self.logfile, 'ab+').write(ntob("%s%s\n" % (self.markerPrefix, key),"utf-8"))
def _read_marked_region(self, marker=None):
"""Return lines from self.logfile in the marked region.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be returned.
"""
## # Give the logger time to finish writing?
## time.sleep(0.5)
logfile = self.logfile
marker = marker or self.lastmarker
if marker is None:
return open(logfile, 'rb').readlines()
if isinstance(marker, unicodestr):
marker = marker.encode('utf-8')
data = []
in_region = False
for line in open(logfile, 'rb'):
if in_region:
if (line.startswith(self.markerPrefix) and not marker in line):
break
else:
data.append(line)
elif marker in line:
in_region = True
return data
def assertInLog(self, line, marker=None):
"""Fail if the given (partial) line is not in the log.
The log will be searched from the given marker to the next marker.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be searched.
"""
data = self._read_marked_region(marker)
for logline in data:
if line in logline:
return
msg = "%r not found in log" % line
self._handleLogError(msg, data, marker, line)
def assertNotInLog(self, line, marker=None):
"""Fail if the given (partial) line is in the log.
The log will be searched from the given marker to the next marker.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be searched.
"""
data = self._read_marked_region(marker)
for logline in data:
if line in logline:
msg = "%r found in log" % line
self._handleLogError(msg, data, marker, line)
def assertLog(self, sliceargs, lines, marker=None):
"""Fail if log.readlines()[sliceargs] is not contained in 'lines'.
The log will be searched from the given marker to the next marker.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be searched.
"""
data = self._read_marked_region(marker)
if isinstance(sliceargs, int):
# Single arg. Use __getitem__ and allow lines to be str or list.
if isinstance(lines, (tuple, list)):
lines = lines[0]
if isinstance(lines, unicodestr):
lines = lines.encode('utf-8')
if lines not in data[sliceargs]:
msg = "%r not found on log line %r" % (lines, sliceargs)
self._handleLogError(msg, [data[sliceargs],"--EXTRA CONTEXT--"] + data[sliceargs+1:sliceargs+6], marker, lines)
else:
# Multiple args. Use __getslice__ and require lines to be list.
if isinstance(lines, tuple):
lines = list(lines)
elif isinstance(lines, basestring):
raise TypeError("The 'lines' arg must be a list when "
"'sliceargs' is a tuple.")
start, stop = sliceargs
for line, logline in zip(lines, data[start:stop]):
if isinstance(line, unicodestr):
line = line.encode('utf-8')
if line not in logline:
msg = "%r not found in log" % line
self._handleLogError(msg, data[start:stop], marker, line)
|
lavish205/olympia
|
refs/heads/master
|
src/olympia/lib/es/tests/test_models.py
|
13
|
import mock
from olympia.amo.tests import TestCase
from olympia.lib.es.models import Reindexing
class TestReindexManager(TestCase):
def test_flag_reindexing(self):
assert Reindexing.objects.filter(site='foo').count() == 0
# Flagging for the first time.
res = Reindexing.objects._flag_reindexing('foo', 'bar', 'baz', 'quux')
assert Reindexing.objects.filter(site='foo').count() == 1
assert res.site == 'foo'
assert res.new_index == 'bar'
assert res.old_index == 'baz'
assert res.alias == 'quux'
# Flagging for the second time.
res = Reindexing.objects._flag_reindexing('foo', 'bar', 'baz', 'quux')
assert Reindexing.objects.filter(site='foo').count() == 1
assert res is None
@mock.patch('olympia.lib.es.models.ReindexingManager._flag_reindexing')
def test_flag_reindexing_amo(self, flag_reindexing_mock):
Reindexing.objects.flag_reindexing_amo('bar', 'baz', 'quux')
assert flag_reindexing_mock.called_with([
('amo', 'bar', 'baz', 'quux')])
def test_unflag_reindexing(self):
assert Reindexing.objects.filter(site='foo').count() == 0
# Unflagging unflagged database does nothing.
Reindexing.objects._unflag_reindexing('foo')
assert Reindexing.objects.filter(site='foo').count() == 0
# Flag, then unflag.
Reindexing.objects.create(site='foo', new_index='bar', old_index='baz',
alias='quux')
assert Reindexing.objects.filter(site='foo').count() == 1
Reindexing.objects._unflag_reindexing('foo')
assert Reindexing.objects.filter(site='foo').count() == 0
# Unflagging another site doesn't clash.
Reindexing.objects.create(site='bar', new_index='bar', old_index='baz',
alias='quux')
Reindexing.objects._unflag_reindexing('foo')
assert Reindexing.objects.filter(site='bar').count() == 1
@mock.patch('olympia.lib.es.models.ReindexingManager._unflag_reindexing')
def test_unflag_reindexing_amo(self, unflag_reindexing_mock):
Reindexing.objects.unflag_reindexing_amo()
assert unflag_reindexing_mock.called_with([('amo')])
def test_is_reindexing(self):
assert Reindexing.objects.filter(site='foo').count() == 0
assert not Reindexing.objects._is_reindexing('foo')
Reindexing.objects.create(site='foo', new_index='bar', old_index='baz',
alias='quux')
assert Reindexing.objects._is_reindexing('foo')
# Reindexing on another site doesn't clash.
assert not Reindexing.objects._is_reindexing('bar')
@mock.patch('olympia.lib.es.models.ReindexingManager._is_reindexing')
def test_is_reindexing_amo(self, is_reindexing_mock):
Reindexing.objects.is_reindexing_amo()
assert is_reindexing_mock.called_with([('amo')])
def test_get_indices(self):
# Not reindexing.
assert Reindexing.objects.filter(alias='foo').count() == 0
assert Reindexing.objects.get_indices('foo') == ['foo']
# Reindexing on 'foo'.
Reindexing.objects.create(site='foo', new_index='bar', old_index='baz',
alias='quux')
assert Reindexing.objects.get_indices('quux') == ['bar', 'baz']
# Doesn't clash on other sites.
assert Reindexing.objects.get_indices('other') == ['other']
|
izgzhen/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/__init__.py
|
1447
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
BenignBeppe/common-desktop
|
refs/heads/master
|
common_desktop.py
|
1
|
#! /usr/bin/env python3
import urllib
import json
import sqlite3
import random
import os
import logging
import datetime
import argparse
import subprocess
import webbrowser
import requests
LOGS_PATH = "logs"
IMAGES_PATH = "images"
DB_PATH = "images.db"
URL = "https://commons.wikimedia.org/w/api.php"
GNOME_SET_BACKGROUND_COMMAND = "gsettings set org.gnome.desktop.background picture-uri file://{image_path}"
MATE_SET_BACKGROUND_COMMAND = "gsettings set org.mate.background picture-filename {image_path}"
COMMONS_PAGE_BY_ID = "https://commons.wikimedia.org/w/?curid={page_id}"
DEFAULT_FETCH_AMOUNT = 100
# Statuses for an image.
FAVORITE = 1
def setup_loggin(print_log):
ensure_path_exists(LOGS_PATH)
log_path = "{}/common-desktop.log".format(LOGS_PATH)
logging.basicConfig(
filename=log_path,
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s"
)
if print_log:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(
logging.Formatter("%(asctime)s %(message)s")
)
logging.getLogger().addHandler(stream_handler)
def ensure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def populate_table(connection, category, number_of_pages_to_fetch):
ensure_images_table_exists(connection)
number_of_added_pages = 0
for page_id in get_page_ids(category):
page = connection.execute(
"SELECT * FROM images WHERE id={}".format(page_id)
).fetchone()
if page is not None:
logging.info("Id {} already exists, skipping.".format(page_id))
else:
connection.execute(
"INSERT INTO images VALUES({id}, 0, 0, 0)".format(id=page_id)
)
connection.commit()
logging.info("Added page id to database: {}".format(page_id))
number_of_added_pages += 1
if number_of_added_pages == number_of_pages_to_fetch:
break
logging.info(
"Added {} new pages to database.".format(number_of_added_pages)
)
def ensure_images_table_exists(connection):
connection.execute("CREATE TABLE IF NOT EXISTS images (id int UNIQUE, current boolean, status int, last_shown int)")
connection.commit()
def get_page_ids(category):
parameters = {
"action": "query",
"format": "json",
"list": "categorymembers",
"cmtitle": category,
"cmtype": "file|subcat",
"cmlimit": "500",
"continue": ""
}
while True:
response = send_request(parameters)
pages = response["query"]["categorymembers"]
for page in pages:
if page["ns"] == 6:
# Page is in the File namespace.
yield page["pageid"]
elif page["ns"] == 14:
# Page is in the Category namespace.
catergory = page["title"]
for id in get_page_ids(catergory):
yield id
if "continue" not in response:
# Keep fetching until there is no continue paramter.
break
parameters["continue"] = response["continue"]["continue"]
parameters["cmcontinue"] = response["continue"]["cmcontinue"]
def send_request(parameters):
parameters_string = urllib.parse.urlencode(parameters)
logging.debug("REQUEST: {}?{}".format(URL, parameters_string))
response = requests.get(URL, params=parameters)
logging.debug("RESPONSE: {}".format(response.json()))
return response.json()
def change_image(connection):
old_id = get_current_id(connection)
if old_id is not None:
status = connection.execute(
"SELECT status FROM images WHERE id={}".format(old_id)
).fetchone()[0]
if status != FAVORITE:
old_path = get_path_for_page_id(old_id)
if old_path is not None:
logging.info("Removing image: {}.".format(old_path))
os.remove(old_path)
connection.execute(
"UPDATE images SET current=0, last_shown=strftime('%s','now') WHERE id={}".format(old_id)
)
page_id = pick_page(connection)
if page_id is None:
logging.info("No new image to switch to, keeping current one.");
else:
if not image_exists(page_id):
image_url = get_image_url(page_id)
download_image(image_url, page_id)
image_path = get_path_for_page_id(page_id)
set_desktop_image(image_path)
connection.execute(
"UPDATE images SET current=1 WHERE id={}".format(page_id)
)
connection.commit()
def pick_page(connection):
# Pick from favorites and images that hasn't been shown yet.
page_ids = connection.execute(
"SELECT id FROM images WHERE status=1 or last_shown=0"
).fetchall()
if page_ids:
return random.choice(page_ids)[0]
def image_exists(page_id):
return get_path_for_page_id(page_id)
def get_path_for_page_id(page_id):
files = os.listdir(IMAGES_PATH)
for file_ in files:
if os.path.splitext(file_)[0] == str(page_id):
return "{}/{}".format(IMAGES_PATH, file_)
def get_image_url(page_id):
parameters = {
"action": "query",
"format": "json",
"pageids": page_id,
"prop": "imageinfo",
"iiprop": "url"
}
response = send_request(parameters)
pages = response["query"]["pages"]
url = pages[list(pages)[0]]["imageinfo"][0]["url"]
return url
def download_image(image_url, page_id):
file_ending = os.path.splitext(image_url)[1]
path = "{}/{}{}".format(IMAGES_PATH, page_id, file_ending)
logging.info("Downloading {} to {}.".format(image_url, path))
urllib.request.urlretrieve(image_url, path)
def set_desktop_image(image_path):
command = get_set_background_command().format(
image_path=os.path.abspath(image_path)
)
logging.info("Running: {}".format(command))
subprocess.call(command.split())
def get_set_background_command():
if process_is_running("gnome-session"):
return GNOME_SET_BACKGROUND_COMMAND
elif process_is_running("mate-session"):
return MATE_SET_BACKGROUND_COMMAND
def process_is_running(process_name):
user_name = subprocess.check_output(["whoami"]).decode()
command = "pgrep -u {} {}".format(user_name, process_name).split()
try:
return subprocess.check_output(command) != ""
except subprocess.CalledProcessError:
# This seems to happen when the program isn't installed.
return False
def show_image_page(connection):
page_id = get_current_id(connection)
webbrowser.open(COMMONS_PAGE_BY_ID.format(page_id=page_id))
def get_current_id(connection):
current_id = connection.execute(
"SELECT id FROM images WHERE current=1"
).fetchone()
if current_id is not None:
return current_id[0]
def set_current_status(connection, new_status):
current_id = get_current_id(connection)
logging.info(
"Setting status of current image (id={}) to: {}."
.format(current_id, new_status)
)
connection.execute(
"UPDATE images SET status={} WHERE id={}".format(
new_status, current_id
)
)
connection.commit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--fetch-ids",
"-f",
nargs="+",
metavar=("CATEGORY", "AMOUNT"),
help="Update the page ids in the database by getting the page ids from CATEGORY and its subcategories. AMOUNT is the maximum number of ids that will be fetched, default {}.".format(DEFAULT_FETCH_AMOUNT)
)
parser.add_argument(
"--print-log",
"-l",
action="store_true",
help="Write log messages to stderr as well as log file."
)
parser.add_argument(
"--new-image",
"-n",
action="store_true",
help="Change to a new desktop image."
)
parser.add_argument(
"--information",
"-i",
action="store_true",
help="Open the page for the current image on Wikimedia Commons in the default web browser."
)
parser.add_argument(
"--favorite",
"-a",
action="store_true",
help="Marks the current image as a favorite. Favorites won't be deleted when swithing to a new image."
)
args = parser.parse_args()
setup_loggin(args.print_log)
ensure_path_exists(IMAGES_PATH)
connection = sqlite3.connect(DB_PATH)
if args.fetch_ids:
if len(args.fetch_ids) > 1:
amount = int(args.fetch_ids[1])
else:
amount = DEFAULT_FETCH_AMOUNT
populate_table(
connection,
"Category:{}".format(args.fetch_ids[0]),
amount
)
if args.new_image:
change_image(connection)
if args.information:
show_image_page(connection)
if args.favorite:
set_current_status(connection, FAVORITE)
connection.close()
|
marshall/titanium
|
refs/heads/master
|
site_scons/simplejson/tests/test_unicode.py
|
123
|
from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"%s"' % (u,))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["%s"]' % (u,))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)
|
b-jesch/service.fritzbox.callmonitor
|
refs/heads/master
|
resources/lib/PhoneBooks/pyicloud/vendorlibs/click/globals.py
|
234
|
from threading import local
_local = local()
def get_current_context(silent=False):
"""Returns the current click context. This can be used as a way to
access the current context object from anywhere. This is a more implicit
alternative to the :func:`pass_context` decorator. This function is
primarily useful for helpers such as :func:`echo` which might be
interested in changing it's behavior based on the current context.
To push the current context, :meth:`Context.scope` can be used.
.. versionadded:: 5.0
:param silent: is set to `True` the return value is `None` if no context
is available. The default behavior is to raise a
:exc:`RuntimeError`.
"""
try:
return getattr(_local, 'stack')[-1]
except (AttributeError, IndexError):
if not silent:
raise RuntimeError('There is no active click context.')
def push_context(ctx):
"""Pushes a new context to the current stack."""
_local.__dict__.setdefault('stack', []).append(ctx)
def pop_context():
"""Removes the top level from the stack."""
_local.stack.pop()
def resolve_color_default(color=None):
""""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
|
carolinux/QGIS
|
refs/heads/master
|
python/ext-libs/pygments/styles/emacs.py
|
364
|
# -*- coding: utf-8 -*-
"""
pygments.styles.emacs
~~~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by Emacs.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class EmacsStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #008800",
Comment.Preproc: "noitalic",
Comment.Special: "noitalic bold",
Keyword: "bold #AA22FF",
Keyword.Pseudo: "nobold",
Keyword.Type: "bold #00BB00",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#AA22FF",
Name.Function: "#00A000",
Name.Class: "#0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#B8860B",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#BB4444",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BB4444",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
String.Symbol: "#B8860B",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
swarna-k/MyDiary
|
refs/heads/master
|
flask/lib/python2.7/site-packages/babel/localedata.py
|
136
|
# -*- coding: utf-8 -*-
"""
babel.localedata
~~~~~~~~~~~~~~~~
Low-level locale data access.
:note: The `Locale` class, which uses this module under the hood, provides a
more convenient interface for accessing the locale data.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import os
import threading
from collections import MutableMapping
from babel._compat import pickle
_cache = {}
_cache_lock = threading.RLock()
_dirname = os.path.join(os.path.dirname(__file__), 'localedata')
def exists(name):
"""Check whether locale data is available for the given locale. Ther
return value is `True` if it exists, `False` otherwise.
:param name: the locale identifier string
"""
if name in _cache:
return True
return os.path.exists(os.path.join(_dirname, '%s.dat' % name))
def locale_identifiers():
"""Return a list of all locale identifiers for which locale data is
available.
.. versionadded:: 0.8.1
:return: a list of locale identifiers (strings)
"""
return [stem for stem, extension in [
os.path.splitext(filename) for filename in os.listdir(_dirname)
] if extension == '.dat' and stem != 'root']
def load(name, merge_inherited=True):
"""Load the locale data for the given locale.
The locale data is a dictionary that contains much of the data defined by
the Common Locale Data Repository (CLDR). This data is stored as a
collection of pickle files inside the ``babel`` package.
>>> d = load('en_US')
>>> d['languages']['sv']
u'Swedish'
Note that the results are cached, and subsequent requests for the same
locale return the same dictionary:
>>> d1 = load('en_US')
>>> d2 = load('en_US')
>>> d1 is d2
True
:param name: the locale identifier string (or "root")
:param merge_inherited: whether the inherited data should be merged into
the data of the requested locale
:raise `IOError`: if no locale data file is found for the given locale
identifer, or one of the locales it inherits from
"""
_cache_lock.acquire()
try:
data = _cache.get(name)
if not data:
# Load inherited data
if name == 'root' or not merge_inherited:
data = {}
else:
parts = name.split('_')
if len(parts) == 1:
parent = 'root'
else:
parent = '_'.join(parts[:-1])
data = load(parent).copy()
filename = os.path.join(_dirname, '%s.dat' % name)
fileobj = open(filename, 'rb')
try:
if name != 'root' and merge_inherited:
merge(data, pickle.load(fileobj))
else:
data = pickle.load(fileobj)
_cache[name] = data
finally:
fileobj.close()
return data
finally:
_cache_lock.release()
def merge(dict1, dict2):
"""Merge the data from `dict2` into the `dict1` dictionary, making copies
of nested dictionaries.
>>> d = {1: 'foo', 3: 'baz'}
>>> merge(d, {1: 'Foo', 2: 'Bar'})
>>> items = d.items(); items.sort(); items
[(1, 'Foo'), (2, 'Bar'), (3, 'baz')]
:param dict1: the dictionary to merge into
:param dict2: the dictionary containing the data that should be merged
"""
for key, val2 in dict2.items():
if val2 is not None:
val1 = dict1.get(key)
if isinstance(val2, dict):
if val1 is None:
val1 = {}
if isinstance(val1, Alias):
val1 = (val1, val2)
elif isinstance(val1, tuple):
alias, others = val1
others = others.copy()
merge(others, val2)
val1 = (alias, others)
else:
val1 = val1.copy()
merge(val1, val2)
else:
val1 = val2
dict1[key] = val1
class Alias(object):
"""Representation of an alias in the locale data.
An alias is a value that refers to some other part of the locale data,
as specified by the `keys`.
"""
def __init__(self, keys):
self.keys = tuple(keys)
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.keys)
def resolve(self, data):
"""Resolve the alias based on the given data.
This is done recursively, so if one alias resolves to a second alias,
that second alias will also be resolved.
:param data: the locale data
:type data: `dict`
"""
base = data
for key in self.keys:
data = data[key]
if isinstance(data, Alias):
data = data.resolve(base)
elif isinstance(data, tuple):
alias, others = data
data = alias.resolve(base)
return data
class LocaleDataDict(MutableMapping):
"""Dictionary wrapper that automatically resolves aliases to the actual
values.
"""
def __init__(self, data, base=None):
self._data = data
if base is None:
base = data
self.base = base
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
orig = val = self._data[key]
if isinstance(val, Alias): # resolve an alias
val = val.resolve(self.base)
if isinstance(val, tuple): # Merge a partial dict with an alias
alias, others = val
val = alias.resolve(self.base).copy()
merge(val, others)
if type(val) is dict: # Return a nested alias-resolving dict
val = LocaleDataDict(val, base=self.base)
if val is not orig:
self._data[key] = val
return val
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def copy(self):
return LocaleDataDict(self._data.copy(), base=self.base)
|
MatthewWilkes/django-oscar
|
refs/heads/master
|
src/oscar/apps/basket/managers.py
|
57
|
from django.db import models
class OpenBasketManager(models.Manager):
"""For searching/creating OPEN baskets only."""
status_filter = "Open"
def get_queryset(self):
return super(OpenBasketManager, self).get_queryset().filter(
status=self.status_filter)
def get_or_create(self, **kwargs):
return self.get_queryset().get_or_create(
status=self.status_filter, **kwargs)
class SavedBasketManager(models.Manager):
"""For searching/creating SAVED baskets only."""
status_filter = "Saved"
def get_queryset(self):
return super(SavedBasketManager, self).get_queryset().filter(
status=self.status_filter)
def create(self, **kwargs):
return self.get_queryset().create(status=self.status_filter, **kwargs)
def get_or_create(self, **kwargs):
return self.get_queryset().get_or_create(
status=self.status_filter, **kwargs)
|
nan86150/ImageFusion
|
refs/heads/master
|
ENV2.7/lib/python2.7/site-packages/setuptools/py27compat.py
|
958
|
"""
Compatibility Support for Python 2.7 and earlier
"""
import sys
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if sys.version_info < (3,):
def get_all_headers(message, key):
return message.getheaders(key)
|
eroicaleo/LearningPython
|
refs/heads/master
|
interview/leet/37_Sudoku_Solver.py
|
1
|
#!/usr/bin/env python3
import itertools
from collections import Counter
class Solution:
def solveSudoku(self, board):
self.d = {}
self.empty_space = len([(i,j) for i, j in itertools.product(range(9), range(9)) if board[i][j] == '.'])
def update(i, j, n, inc):
for k in range(9):
self.d.setdefault((i, k), Counter())[n] += inc
for k in range(9):
self.d.setdefault((k, j), Counter())[n] += inc
for k, l in itertools.product(range(3), range(3)):
x, y = (i//3)*3+k, (j//3)*3+l
self.d.setdefault((x, y), Counter())[n] += inc
def max_entry():
return max([(i,j) for i, j in itertools.product(range(9), range(9)) if board[i][j] == '.'], key=lambda k: len([v for v in self.d[k] if self.d[k][v] > 0]))
for i in range(9):
for j in range(9):
if board[i][j] != '.':
update(i, j, board[i][j], 1)
print(f'max_entry : {self.d[max_entry()]}')
def debug(title):
print('#'*80)
print(title)
print('#'*80)
for row in board:
print(row)
for k in sorted(self.d):
print(k, self.d[k])
def dfs():
if self.empty_space == 0:
return True
x, y = max_entry()
self.empty_space -= 1
a = set(k for k in self.d[(x, y)] if self.d[(x,y)][k] > 0)
for i in set(list("123456789")) - a:
update(x,y,i,1)
board[x][y] = i
debug(f'After updating, {self.empty_space}, x = {x}, y = {y}')
if dfs():
return True
update(x,y,i,-1)
debug('After removing')
board[x][y] = '.'
self.empty_space += 1
return False
dfs()
board = [
['5','3','.','.','7','.','.','.','.'],
['6','.','.','1','9','5','.','.','.'],
['.','9','8','.','.','.','.','6','.'],
['8','.','.','.','6','.','.','.','3'],
['4','.','.','8','.','3','.','.','1'],
['7','.','.','.','2','.','.','.','6'],
['.','6','.','.','.','.','2','8','.'],
['.','.','.','4','1','9','.','.','5'],
['.','.','.','.','8','.','.','7','9'],
]
board = [
[".",".","9","7","4","8",".",".","."],
["7",".",".",".",".",".",".",".","."],
[".","2",".","1",".","9",".",".","."],
[".",".","7",".",".",".","2","4","."],
[".","6","4",".","1",".","5","9","."],
[".","9","8",".",".",".","3",".","."],
[".",".",".","8",".","3",".","2","."],
[".",".",".",".",".",".",".",".","6"],
[".",".",".","2","7","5","9",".","."]]
final_board = [
["5","1","9","7","4","8","6","3","2"],
["7","8","3","6","5","2","4","1","9"],
["4","2","6","1","3","9","8","7","5"],
["3","5","7","9","8","6","2","4","1"],
["2","6","4","3","1","7","5","9","8"],
["1","9","8","5","2","4","3","6","7"],
["9","7","5","8","6","3","1","2","4"],
["8","3","2","4","9","1","7","5","6"],
["6","4","1","2","7","5","9","8","3"]]
sol = Solution()
print(sol.solveSudoku(board))
print(list(itertools.product(range(3), range(3))))
# print(sum([set([1,2]), set([2,3])]))
|
ndp-systemes/odoo-addons
|
refs/heads/8.0
|
report_aeroo_improved/__openerp__.py
|
1
|
# -*- coding: utf8 -*-
#
# Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': 'Report Aeroo Report improved',
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Report',
'depends': ['web_report_improved', 'report_aeroo'],
'description': """
Add the feature of web_report_improved to the aeroo report
""",
'website': 'http://www.ndp-systemes.fr',
'data': [
'ir_report_aeroo.xml'
],
'demo': [],
'test': [],
'installable': True,
'auto_install': True,
'license': 'AGPL-3',
'application': False,
'sequence': 999,
}
|
osvalr/odoo
|
refs/heads/8.0
|
addons/account/project/__init__.py
|
427
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mattvenn/Arduino
|
refs/heads/esp8266
|
arduino-core/src/processing/app/i18n/python/requests/api.py
|
637
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
|
cryptobanana/ansible
|
refs/heads/devel
|
lib/ansible/modules/files/iso_extract.py
|
101
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Jeroen Hoekx (@jhoekx)
- Matt Robinson (@ribbons)
- Dag Wieers (@dagwieers)
module: iso_extract
short_description: Extract files from an ISO image
description:
- This module has two possible ways of operation.
- If 7zip is installed on the system, this module extracts files from an ISO
into a temporary directory and copies files to a given destination,
if needed.
- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
mounts the ISO image to a temporary location, and copies files to a given
destination, if needed.
version_added: '2.3'
requirements:
- Either 7z (from I(7zip) or I(p7zip) package)
- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
options:
image:
description:
- The ISO image to extract files from.
required: yes
aliases: [ path, src ]
dest:
description:
- The destination directory to extract files to.
required: yes
files:
description:
- A list of files to extract from the image.
- Extracting directories does not work.
required: yes
force:
description:
- If C(yes), which will replace the remote file when contents are different than the source.
- If C(no), the file will only be extracted and copied if the destination does not already exist.
type: bool
default: 'yes'
aliases: [ thirsty ]
version_added: '2.4'
executable:
description:
- The path to the C(7z) executable to use for extracting files from the ISO.
default: '7z'
version_added: '2.4'
notes:
- Only the file checksum (content) is taken into account when extracting files
from the ISO image. If C(force=no), only checks the presence of the file.
- In Ansible v2.3 this module was using C(mount) and C(umount) commands only,
requiring root access. This is no longer needed with the introduction of 7zip
for extraction.
'''
EXAMPLES = r'''
- name: Extract kernel and ramdisk from a LiveCD
iso_extract:
image: /tmp/rear-test.iso
dest: /tmp/virt-rear/
files:
- isolinux/kernel
- isolinux/initrd.cgz
'''
RETURN = r'''
#
'''
import os.path
import shutil
import tempfile
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(type='path', required=True, aliases=['path', 'src']),
dest=dict(type='path', required=True),
files=dict(type='list', required=True),
force=dict(type='bool', default=True, aliases=['thirsty']),
executable=dict(type='path'), # No default on purpose
),
supports_check_mode=True,
)
image = module.params['image']
dest = module.params['dest']
files = module.params['files']
force = module.params['force']
executable = module.params['executable']
result = dict(
changed=False,
dest=dest,
image=image,
)
# We want to know if the user provided it or not, so we set default here
if executable is None:
executable = '7z'
binary = module.get_bin_path(executable, None)
# When executable was provided and binary not found, warn user !
if module.params['executable'] is not None and not binary:
module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
if not os.path.exists(dest):
module.fail_json(msg="Directory '%s' does not exist" % dest)
if not os.path.exists(os.path.dirname(image)):
module.fail_json(msg="ISO image '%s' does not exist" % image)
result['files'] = []
extract_files = list(files)
if not force:
# Check if we have to process any files based on existence
for f in files:
dest_file = os.path.join(dest, os.path.basename(f))
if os.path.exists(dest_file):
result['files'].append(dict(
checksum=None,
dest=dest_file,
src=f,
))
extract_files.remove(f)
if not extract_files:
module.exit_json(**result)
tmp_dir = tempfile.mkdtemp()
# Use 7zip when we have a binary, otherwise try to mount
if binary:
cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
else:
cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
rc, out, err = module.run_command(cmd)
if rc != 0:
result.update(dict(
cmd=cmd,
rc=rc,
stderr=err,
stdout=out,
))
shutil.rmtree(tmp_dir)
if binary:
module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
else:
module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
try:
for f in extract_files:
tmp_src = os.path.join(tmp_dir, f)
if not os.path.exists(tmp_src):
module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
src_checksum = module.sha1(tmp_src)
dest_file = os.path.join(dest, os.path.basename(f))
if os.path.exists(dest_file):
dest_checksum = module.sha1(dest_file)
else:
dest_checksum = None
result['files'].append(dict(
checksum=src_checksum,
dest=dest_file,
src=f,
))
if src_checksum != dest_checksum:
if not module.check_mode:
shutil.copy(tmp_src, dest_file)
result['changed'] = True
finally:
if not binary:
module.run_command('umount "%s"' % tmp_dir)
shutil.rmtree(tmp_dir)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Max00355/HTTPLang
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
import sys
setup(name='HTTPLang',
version='2.0.0',
author='Frankie Primerano',
author_email='max00355@gmail.com',
packages=['httplang'],
entry_points={
'console_scripts': ['httplang=httplang:console_main'],
},
url='https://github.com/Max00355/HTTPLang',
description='A scripting language to do HTTP routines.',
classifiers=[
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities'
],
)
|
ownport/jira-reports
|
refs/heads/master
|
jirareports/vendor/requests/packages/chardet/mbcharsetprober.py
|
2923
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
simbha/mAngE-Gin
|
refs/heads/master
|
lib/django/core/files/uploadedfile.py
|
91
|
"""
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
|
bdh1011/wau
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twisted/trial/_dist/test/test_workerreporter.py
|
8
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.workerreporter}.
"""
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase, Todo
from twisted.trial._dist.workerreporter import WorkerReporter
from twisted.trial._dist import managercommands
class FakeAMProtocol(object):
"""
A fake C{AMP} implementations to track C{callRemote} calls.
"""
id = 0
lastCall = None
def callRemote(self, command, **kwargs):
self.lastCall = command
class WorkerReporterTests(TestCase):
"""
Tests for L{WorkerReporter}.
"""
def setUp(self):
self.fakeAMProtocol = FakeAMProtocol()
self.workerReporter = WorkerReporter(self.fakeAMProtocol)
self.test = TestCase()
def test_addSuccess(self):
"""
L{WorkerReporter.addSuccess} sends a L{managercommands.AddSuccess}
command.
"""
self.workerReporter.addSuccess(self.test)
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddSuccess)
def test_addError(self):
"""
L{WorkerReporter.addError} sends a L{managercommands.AddError} command.
"""
self.workerReporter.addError(self.test, Failure(RuntimeError('error')))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddError)
def test_addErrorTuple(self):
"""
Adding an error using L{WorkerReporter.addError} as a
C{sys.exc_info}-style tuple sends an L{managercommands.AddError}
command.
"""
self.workerReporter.addError(
self.test, (RuntimeError, RuntimeError('error'), None))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddError)
def test_addFailure(self):
"""
L{WorkerReporter.addFailure} sends a L{managercommands.AddFailure}
command.
"""
self.workerReporter.addFailure(self.test,
Failure(RuntimeError('fail')))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddFailure)
def test_addFailureTuple(self):
"""
Adding a failure using L{WorkerReporter.addFailure} as a
C{sys.exc_info}-style tuple sends an L{managercommands.AddFailure}
message.
"""
self.workerReporter.addFailure(
self.test, (RuntimeError, RuntimeError('fail'), None))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddFailure)
def test_addSkip(self):
"""
L{WorkerReporter.addSkip} sends a L{managercommands.AddSkip} command.
"""
self.workerReporter.addSkip(self.test, 'reason')
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddSkip)
def test_addExpectedFailure(self):
"""
L{WorkerReporter.addExpectedFailure} sends a
L{managercommands.AddExpectedFailure} command.
protocol.
"""
self.workerReporter.addExpectedFailure(
self.test, Failure(RuntimeError('error')), Todo('todo'))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddExpectedFailure)
def test_addUnexpectedSuccess(self):
"""
L{WorkerReporter.addUnexpectedSuccess} sends a
L{managercommands.AddUnexpectedSuccess} command.
"""
self.workerReporter.addUnexpectedSuccess(self.test, Todo('todo'))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddUnexpectedSuccess)
|
rsalmaso/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/box/provider.py
|
2
|
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class BoxOAuth2Account(ProviderAccount):
pass
class BoxOAuth2Provider(OAuth2Provider):
id = "box"
name = "Box"
account_class = BoxOAuth2Account
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(name=data.get("display_name"), email=data.get("email"))
provider_classes = [BoxOAuth2Provider]
|
wolfelee/zkdash
|
refs/heads/master
|
lib/utils/pyshell.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013,掌阅科技
All rights reserved.
File Name: pyshell.py
Author: WangLichao
Created on: 2014-03-21
'''
import subprocess
import time
def wait_process_end(process, timeout):
'''等待进程终止
Args:
process: 进程句柄
timeout: 超时时间
Returns:
与shell的执行保持一致
0:成功
1:超时
2:错误
'''
if timeout <= 0:
process.wait()
return 0
start_time = time.time()
end_time = start_time + timeout
while 1:
ret = process.poll()
if ret == 0:
return 0
elif ret is None:
cur_time = time.time()
if cur_time >= end_time:
return 1
time.sleep(0.1)
else:
return 2
class ShellResult(object):
'''封装shell执行的返回结果形式
Attributes:
return_code: 返回码
stdout:标准输出
stderr: 错误输出
'''
def __init__(self, return_code, stdout, stderr):
self.return_code = return_code
self.stdout = stdout
self.stderr = stderr
def shell(command, timeout=0, capture=False, debug=False):
'''用于执行本地shell的功能
Args:
command: bash命令
timeout: 命令的超时时间
capture: 是否捕获输出结果
debug: 是否输出debug信息
Returns:
返回ShellResult对象
'''
if debug:
print('=' * 35)
print('[local]' + command)
print('=' * 35)
if capture:
process = subprocess.Popen(command, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
else:
process = subprocess.Popen(command, shell=True)
ret = wait_process_end(process, timeout)
if ret == 1:
process.terminate()
raise Exception("terminated_for_timout")
if capture:
stdout = ''.join(process.stdout.readlines())
stderr = ''.join(process.stderr.readlines())
return ShellResult(process.returncode, stdout, stderr)
else:
return ShellResult(process.returncode, None, None)
|
derDavidT/sympy
|
refs/heads/master
|
sympy/plotting/plot.py
|
55
|
"""Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
|
mhbu50/erpnext
|
refs/heads/develop
|
erpnext/education/doctype/student_group/test_student_group.py
|
17
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
import erpnext.education
def get_random_group():
doc = frappe.get_doc({
"doctype": "Student Group",
"student_group_name": "_Test Student Group-" + frappe.generate_hash(length=5),
"group_based_on": "Activity"
}).insert()
student_list = frappe.get_all('Student', limit=5)
doc.extend("students", [{"student":d.name, "active": 1} for d in student_list])
doc.save()
return doc
class TestStudentGroup(unittest.TestCase):
def test_student_roll_no(self):
doc = get_random_group()
self.assertEqual(max([d.group_roll_number for d in doc.students]), len(doc.students))
def test_in_group(self):
doc = get_random_group()
last_student = doc.students[-1].student
# remove last student
doc.students = doc.students[:-1]
doc.save()
self.assertRaises(erpnext.education.StudentNotInGroupError,
erpnext.education.validate_student_belongs_to_group, last_student, doc.name)
# safe, don't throw validation
erpnext.education.validate_student_belongs_to_group(doc.students[0].student, doc.name)
|
anirudhSK/chromium
|
refs/heads/master
|
third_party/tlslite/tlslite/integration/HTTPTLSConnection.py
|
87
|
"""TLS Lite + httplib."""
import socket
import httplib
from tlslite.TLSConnection import TLSConnection
from tlslite.integration.ClientHelper import ClientHelper
class HTTPBaseTLSConnection(httplib.HTTPConnection):
"""This abstract class provides a framework for adding TLS support
to httplib."""
default_port = 443
def __init__(self, host, port=None, strict=None):
if strict == None:
#Python 2.2 doesn't support strict
httplib.HTTPConnection.__init__(self, host, port)
else:
httplib.HTTPConnection.__init__(self, host, port, strict)
def connect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(sock, 'settimeout'):
sock.settimeout(10)
sock.connect((self.host, self.port))
#Use a TLSConnection to emulate a socket
self.sock = TLSConnection(sock)
#When httplib closes this, close the socket
self.sock.closeSocket = True
self._handshake(self.sock)
def _handshake(self, tlsConnection):
"""Called to perform some sort of handshake.
This method must be overridden in a subclass to do some type of
handshake. This method will be called after the socket has
been connected but before any data has been sent. If this
method does not raise an exception, the TLS connection will be
considered valid.
This method may (or may not) be called every time an HTTP
request is performed, depending on whether the underlying HTTP
connection is persistent.
@type tlsConnection: L{tlslite.TLSConnection.TLSConnection}
@param tlsConnection: The connection to perform the handshake
on.
"""
raise NotImplementedError()
class HTTPTLSConnection(HTTPBaseTLSConnection, ClientHelper):
"""This class extends L{HTTPBaseTLSConnection} to support the
common types of handshaking."""
def __init__(self, host, port=None,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""Create a new HTTPTLSConnection.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods inherited from
L{httplib.HTTPConnection} such as request(), connect(), and
send(). See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
HTTPBaseTLSConnection.__init__(self, host, port)
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
def _handshake(self, tlsConnection):
ClientHelper._handshake(self, tlsConnection)
|
prakash-alpine/chorus
|
refs/heads/master
|
packaging/setup/config_lib/configure_ldap.py
|
4
|
import os
import sys
sys.path.append("..")
def configure_ldap(options):
os.system("${EDITOR:-vi} " + os.path.join(options.chorus_path, "shared/ldap.properties"))
|
JshWright/home-assistant
|
refs/heads/dev
|
tests/components/recorder/test_util.py
|
26
|
"""Test util methods."""
from unittest.mock import patch, MagicMock
import pytest
from homeassistant.components.recorder import util
from homeassistant.components.recorder.const import DATA_INSTANCE
from tests.common import get_test_home_assistant, init_recorder_component
@pytest.fixture
def hass_recorder():
"""HASS fixture with in-memory recorder."""
hass = get_test_home_assistant()
def setup_recorder(config=None):
"""Setup with params."""
init_recorder_component(hass, config)
hass.start()
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
return hass
yield setup_recorder
hass.stop()
def test_recorder_bad_commit(hass_recorder):
"""Bad _commit should retry 3 times."""
hass = hass_recorder()
def work(session):
"""Bad work."""
session.execute('select * from notthere')
with patch('homeassistant.components.recorder.time.sleep') as e_mock, \
util.session_scope(hass=hass) as session:
res = util.commit(session, work)
assert res is False
assert e_mock.call_count == 3
def test_recorder_bad_execute(hass_recorder):
"""Bad execute, retry 3 times."""
from sqlalchemy.exc import SQLAlchemyError
hass_recorder()
def to_native():
"""Rasie exception."""
raise SQLAlchemyError()
mck1 = MagicMock()
mck1.to_native = to_native
with pytest.raises(SQLAlchemyError), \
patch('homeassistant.components.recorder.time.sleep') as e_mock:
util.execute((mck1,))
assert e_mock.call_count == 2
|
weiting-chen/manila
|
refs/heads/master
|
manila/tests/api/openstack/test_wsgi.py
|
1
|
import ddt
import inspect
import webob
from manila.api.openstack import wsgi
from manila import exception
from manila import test
from manila.tests.api import fakes
@ddt.ddt
class RequestTest(test.TestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertEqual(None, request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
content_type = 'application/json'
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_cache_and_retrieve_resources(self):
request = wsgi.Request.blank('/foo')
# Test that trying to retrieve a cached object on
# an empty cache fails gracefully
self.assertIsNone(request.cached_resource())
self.assertIsNone(request.cached_resource_by_id('r-0'))
resources = [{'id': 'r-%s' % x} for x in range(3)]
# Cache an empty list of resources using the default name
request.cache_resource([])
self.assertEqual({}, request.cached_resource())
self.assertIsNone(request.cached_resource('r-0'))
# Cache some resources
request.cache_resource(resources[:2])
# Cache one resource
request.cache_resource(resources[2])
# Cache a different resource name
other_resource = {'id': 'o-0'}
request.cache_resource(other_resource, name='other-resource')
self.assertEqual(resources[0], request.cached_resource_by_id('r-0'))
self.assertEqual(resources[1], request.cached_resource_by_id('r-1'))
self.assertEqual(resources[2], request.cached_resource_by_id('r-2'))
self.assertIsNone(request.cached_resource_by_id('r-3'))
self.assertEqual(
{'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]},
request.cached_resource())
self.assertEqual(
other_resource,
request.cached_resource_by_id('o-0', name='other-resource'))
@ddt.data(
'share_type',
)
def test_cache_and_retrieve_resources_by_resource(self, resource_name):
cache_all_func = 'cache_db_%ss' % resource_name
cache_one_func = 'cache_db_%s' % resource_name
get_db_all_func = 'get_db_%ss' % resource_name
get_db_one_func = 'get_db_%s' % resource_name
r = wsgi.Request.blank('/foo')
amount = 5
res_range = range(amount)
resources = [{'id': 'id%s' % x} for x in res_range]
# Store 2
getattr(r, cache_all_func)(resources[:amount - 1])
# Store 1
getattr(r, cache_one_func)(resources[amount - 1])
for i in res_range:
self.assertEqual(
resources[i],
getattr(r, get_db_one_func)('id%s' % i),
)
self.assertIsNone(getattr(r, get_db_one_func)('id%s' % amount))
self.assertEqual(
{'id%s' % i: resources[i] for i in res_range},
getattr(r, get_db_all_func)())
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class DictSerializerTest(test.TestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual(serializer.serialize({}, 'update'), '')
class JSONDictSerializerTest(test.TestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
class TextDeserializerTest(test.TestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual(deserializer.deserialize({}, 'update'), {})
class JSONDeserializerTest(test.TestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
class ResourceTest(test.TestCase):
def test_resource_call(self):
class Controller(object):
def index(self, req):
return 'off'
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.body, 'off')
self.assertEqual(response.status_int, 200)
def test_resource_not_authorized(self):
class Controller(object):
def index(self, req):
raise exception.NotAuthorized()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.status_int, 403)
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'index', None, '')
actual = resource.dispatch(method, None, {'pants': 'off'})
expected = 'off'
self.assertEqual(actual, expected)
def test_get_method_undefined_controller_action(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(AttributeError, resource.get_method,
None, 'create', None, '')
def test_get_method_action_json(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.MalformedRequestBody, resource.get_method,
None, 'action', 'application/json', '{}')
def test_get_method_unknown_controller_action(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(KeyError, resource.get_method,
None, 'action', 'application/json',
'{"barAction": true}')
def test_get_method_action_method(self):
class Controller():
def action(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, extensions = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction')
self.assertEqual(controller.action, method)
def test_get_action_args(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(resource.get_action_args(env), expected)
def test_get_body_bad_content(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/none'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, None)
self.assertEqual(body, '')
def test_get_body_no_content_type(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, None)
self.assertEqual(body, '')
def test_get_body_no_content_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = ''
content_type, body = resource.get_body(request)
self.assertEqual(content_type, None)
self.assertEqual(body, '')
def test_get_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, 'application/json')
self.assertEqual(body, 'foo')
def test_deserialize_badtype(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.InvalidContentType,
resource.deserialize,
controller.index, 'application/none', 'foo')
def test_deserialize_default(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/json', 'foo')
self.assertEqual(obj, 'json')
def test_deserialize_decorator(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/json', 'foo')
self.assertEqual(obj, 'json')
def test_register_actions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
@wsgi.action('barAction')
def _action_bar(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_actions)
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({'fooAction': extended._action_foo,
'barAction': extended._action_bar, },
resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp, id, body):
return None
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_extensions)
self.assertEqual({}, resource.wsgi_action_extensions)
extended = ControllerExtended()
resource.register_extensions(extended)
self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
self.assertEqual({'fooAction': [extended._action_foo]},
resource.wsgi_action_extensions)
def test_get_method_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'index', None, '')
self.assertEqual(method, controller.index)
self.assertEqual(extensions, [extended.index])
def test_get_method_action_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
class ControllerExtended(wsgi.Controller):
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(method, controller._action_foo)
self.assertEqual(extensions, [extended._action_foo])
def test_get_method_action_whitelist_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('create')
def _create(self, req, body):
pass
@wsgi.action('delete')
def _delete(self, req, id):
pass
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_actions(extended)
method, extensions = resource.get_method(None, 'create',
'application/json',
'{"create": true}')
self.assertEqual(method, extended._create)
self.assertEqual(extensions, [])
method, extensions = resource.get_method(None, 'delete', None, None)
self.assertEqual(method, extended._delete)
self.assertEqual(extensions, [])
def test_pre_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(called, [])
self.assertEqual(response, None)
self.assertEqual(list(post), [extension2, extension1])
def test_pre_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
resp_obj = yield
called.append('post1')
def extension2(req):
called.append('pre2')
resp_obj = yield
called.append('post2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
post = list(post)
self.assertEqual(called, ['pre1', 'pre2'])
self.assertEqual(response, None)
self.assertEqual(len(post), 2)
self.assertTrue(inspect.isgenerator(post[0]))
self.assertTrue(inspect.isgenerator(post[1]))
for gen in post:
try:
gen.send(None)
except StopIteration:
continue
self.assertEqual(called, ['pre1', 'pre2', 'post2', 'post1'])
def test_pre_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield 'foo'
def extension2(req):
called.append('pre2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(called, ['pre1'])
self.assertEqual(response, 'foo')
self.assertEqual(post, [])
def test_post_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual(called, [2, 1])
self.assertEqual(response, None)
def test_post_process_extensions_regular_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return 'foo'
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual(called, [2])
self.assertEqual(response, 'foo')
def test_post_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
resp_obj = yield
called.append(1)
def extension2(req):
resp_obj = yield
called.append(2)
ext1 = extension1(None)
next(ext1)
ext2 = extension2(None)
next(ext2)
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual(called, [2, 1])
self.assertEqual(response, None)
def test_post_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
resp_obj = yield
called.append(1)
def extension2(req):
resp_obj = yield
called.append(2)
yield 'foo'
ext1 = extension1(None)
next(ext1)
ext2 = extension2(None)
next(ext2)
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual(called, [2])
self.assertEqual(response, 'foo')
class ResponseObjectTest(test.TestCase):
def test_default_code(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.code, 200)
def test_modified_code(self):
robj = wsgi.ResponseObject({})
robj._default_code = 202
self.assertEqual(robj.code, 202)
def test_override_default_code(self):
robj = wsgi.ResponseObject({}, code=404)
self.assertEqual(robj.code, 404)
def test_override_modified_code(self):
robj = wsgi.ResponseObject({}, code=404)
robj._default_code = 202
self.assertEqual(robj.code, 404)
def test_set_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj.headers, {'header': 'foo'})
def test_get_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj['hEADER'], 'foo')
def test_del_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
del robj['hEADER']
self.assertFalse('header' in robj.headers)
def test_header_isolation(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
hdrs = robj.headers
hdrs['hEADER'] = 'bar'
self.assertEqual(robj['hEADER'], 'foo')
def test_default_serializers(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.serializers, {})
def test_bind_serializers(self):
robj = wsgi.ResponseObject({}, json='foo')
robj._bind_method_serializers(dict(xml='bar', json='baz'))
self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
def test_get_serializer(self):
robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
_mtype, serializer = robj.get_serializer(content_type)
self.assertEqual(serializer, mtype)
def test_get_serializer_defaults(self):
robj = wsgi.ResponseObject({})
default_serializers = dict(json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
self.assertRaises(exception.InvalidContentType,
robj.get_serializer, content_type)
_mtype, serializer = robj.get_serializer(content_type,
default_serializers)
self.assertEqual(serializer, mtype)
def test_serialize(self):
class JSONSerializer(object):
def serialize(self, obj):
return 'json'
class XMLSerializer(object):
def serialize(self, obj):
return 'xml'
class AtomSerializer(object):
def serialize(self, obj):
return 'atom'
robj = wsgi.ResponseObject({}, code=202,
json=JSONSerializer,
xml=XMLSerializer,
atom=AtomSerializer)
robj['X-header1'] = 'header1'
robj['X-header2'] = 'header2'
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
request = wsgi.Request.blank('/tests/123')
response = robj.serialize(request, content_type)
self.assertEqual(response.headers['Content-Type'], content_type)
self.assertEqual(response.headers['X-header1'], 'header1')
self.assertEqual(response.headers['X-header2'], 'header2')
self.assertEqual(response.status_int, 202)
self.assertEqual(response.body, mtype)
class ValidBodyTest(test.TestCase):
def setUp(self):
super(ValidBodyTest, self).setUp()
self.controller = wsgi.Controller()
def test_is_valid_body(self):
body = {'foo': {}}
self.assertTrue(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_none(self):
resource = wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body(None, 'foo'))
def test_is_valid_body_empty(self):
resource = wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body({}, 'foo'))
def test_is_valid_body_no_entity(self):
resource = wsgi.Resource(controller=None)
body = {'bar': {}}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_malformed_entity(self):
resource = wsgi.Resource(controller=None)
body = {'foo': 'bar'}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
|
futurepr0n/Books-solutions
|
refs/heads/master
|
Python-For-Everyone-Horstmann/Chapter6-Lists/R6.1E.py
|
1
|
# Given the list values = [] , write code that fills the list with each set of numbers below.
# e.1 4 9 16 9 7 4 9 11
list = []
# there's no pattern
list.append(1)
list.append(4)
list.append(9)
list.append(16)
list.append(9)
list.append(7)
list.append(4)
list.append(9)
list.append(11)
print(list)
|
priyanshsaxena/techmeet
|
refs/heads/master
|
backup_stuff/text_classification/test.py
|
2
|
# -*- coding: utf-8 -*-
import pysentiment as ps
def sentiment(text):
hiv4 = ps.HIV4()
tokens = hiv4.tokenize(text.decode('utf-8'))
score = hiv4.get_score(tokens)
print (score)
if __name__ == '__main__':
string = ""
sentiment(string)
|
osigaud/ArmModelPython
|
refs/heads/master
|
Cython/M2/ArmModel/ArmParameters.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher
Module: ArmParameters
Description: -We find here all arm parameters
-we use a model of arm with two joints and six muscles
'''
import numpy as np
from GlobalVariables import pathWorkingDirectory
class ArmParameters:
'''
class ArmParameters
'''
def __init__(self):
'''
Intializes the class
'''
self.pathSetupFile = pathWorkingDirectory + "/ArmModel/Setup/Arm.params"
self.readSetupFile()
self.massMatrix()
self.AMatrix()
self.BMatrix()
self.readStops()
def readSetupFile(self):
'''
Reads the setup file
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
#Split to read line by line
allsByLign = alls.split("\n")
#line 1, Arm length
self.l1 = float((allsByLign[0].split(":"))[1])
#line 2, ForeArm length
self.l2 = float((allsByLign[1].split(":"))[1])
#line 3, Arm mass
self.m1 = float((allsByLign[2].split(":"))[1])
#line 4, ForeArm mass
self.m2 = float((allsByLign[3].split(":"))[1])
#line 5, Arm inertia
self.I1 = float((allsByLign[4].split(":"))[1])
#line 6, ForeArm inertia
self.I2 = float((allsByLign[5].split(":"))[1])
#line 7, Distance from the center of segment 1 to its center of mass
self.s1 = float((allsByLign[6].split(":"))[1])
#line 8, Distance from the center of segment 2 to its center of mass
self.s2 = float((allsByLign[7].split(":"))[1])
def massMatrix(self):
'''
Initialization of parameters used for the inertia matrix
'''
self.k1 = self.I1 + self.I2 + self.m2*(self.l1**2)
self.k2 = self.m2*self.l1*self.s2
self.k3 = self.I2
def BMatrix(self):
'''
Defines the damping matrix B
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 9, Damping term k6
b1 = float((allsByLign[8].split(":"))[1])
#line 10, Damping term k7
b2 = float((allsByLign[9].split(":"))[1])
#line 11, Damping term k8
b3 = float((allsByLign[10].split(":"))[1])
#line 12, Damping term k9
b4 = float((allsByLign[11].split(":"))[1])
#matrix definition
self.B = np.array([[b1,b2],[b3,b4]])
def AMatrix(self):
'''
Defines the moment arm matrix A
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 13, Moment arm matrix, a1
a1 = float((allsByLign[12].split(":"))[1])
#line 14, Moment arm matrix, a2
a2 = float((allsByLign[13].split(":"))[1])
#line 15, Moment arm matrix, a3
a3 = float((allsByLign[14].split(":"))[1])
#line 16, Moment arm matrix, a4
a4 = float((allsByLign[15].split(":"))[1])
#line 17, Moment arm matrix, a5
a5 = float((allsByLign[16].split(":"))[1])
#line 18, Moment arm matrix, a6
a6 = float((allsByLign[17].split(":"))[1])
#line 19, Moment arm matrix, a7
a7 = float((allsByLign[18].split(":"))[1])
#line 20, Moment arm matrix, a8
a8 = float((allsByLign[19].split(":"))[1])
#line 21, Moment arm matrix, a9
a9 = float((allsByLign[20].split(":"))[1])
#line 22, Moment arm matrix, a10
a10 = float((allsByLign[21].split(":"))[1])
#line 23, Moment arm matrix, a11
a11 = float((allsByLign[22].split(":"))[1])
#line 24, Moment arm matrix, a12
a12 = float((allsByLign[23].split(":"))[1])
#matrix definition
self.At = np.array([[a1,a2,a3,a4,a5,a6], [a7,a8,a9,a10,a11,a12]])
def readStops(self):
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 25, Shoulder upper bound
self.sub = float((allsByLign[24].split(":"))[1])
#line 26, Shoulder lower bound
self.slb = float((allsByLign[25].split(":"))[1])
#line 27, Elbow upper bound
self.eub = float((allsByLign[26].split(":"))[1])
#line 28, Elbow lower bound
self.elb = float((allsByLign[27].split(":"))[1])
|
shengqh/ngsperl
|
refs/heads/master
|
lib/SmallRNA/updateShortReadParentCount.py
|
1
|
import argparse
import sys
import logging
import os
import csv
class ReadItem:
def __init__(self, sequence, totalCount):
self.Sequence = sequence
self.TotalCount = totalCount
self.SampleMap = {}
class AnnotationItem:
def __init__(self, sequence, totalCount, category, counts):
self.Sequence = sequence
self.TotalCount = totalCount
self.Categories = [category]
self.Counts = counts
def getValue(value):
return value.TotalCount
def getFilename(value):
return value[1]
def update(logger, args):
logger.info("Reading short reads:" + input + " ...")
shortReadMap = {}
shortReadFiles = []
shortFileList = []
with open(input, 'r') as sr:
for line in sr:
parts = line.rstrip().split('\t')
shortFileList.append(parts)
shortFileList = sorted(shortFileList, key=getFilename)
for parts in shortFileList:
sampleFile = parts[0]
sample = parts[1]
shortReadFiles.append(sample)
logger.info(" Reading " + sampleFile + " ...")
with open(sampleFile, 'r') as fin:
fin.readline()
for line in fin:
reads = line.rstrip().split('\t')
count = int(reads[1])
seq = reads[2].rstrip()
if not seq in shortReadMap:
ri = ReadItem(seq, count)
shortReadMap[seq] = ri
else:
ri = shortReadMap[seq]
ri.TotalCount += count
ri.SampleMap[sample] = count
if minSampleCount > 1 or minReadCount > 1:
shortReads = []
for read in shortReadMap.values():
validSampleCount = len([v for v in read.SampleMap.values() if v >= minReadCount])
if validSampleCount >= minSampleCount:
shortReads.append(read)
else:
shortReads = shortReadMap.values()
shortReads = sorted(shortReads, key=getValue, reverse=True)
if len(shortReads) > maxNumber:
shortReads = shortReads[0:maxNumber]
logger.info("Reading max mapped reads:" + maxMapped + " ...")
maxmappedReads = {}
with open(maxMapped, 'r') as sr:
for line in sr:
parts = line.split('\t')
logger.info(" Reading " + parts[0] + " ...")
with open(parts[0], 'r') as fin:
while True:
qname = fin.readline().rstrip()
if not qname:
break
seq = fin.readline()
fin.readline()
fin.readline()
if qname.endswith("_"):
maxmappedReads[seq.rstrip()] = 1
cnames = names.split(",")
logger.info("Reading annotated reads:" + annotated + " ...")
annotatedReadMap = {}
annotatedFiles = []
with open(annotated, 'r') as annolist:
iIndex = -1
for row in annolist:
parts = row.split('\t')
annofile = parts[0]
iIndex = iIndex + 1
category = cnames[iIndex]
logger.info(" Reading " + annofile + " ...")
with open(annofile, 'r') as sr:
annotatedFiles = sr.readline().rstrip().split('\t')[1:]
for line in sr:
parts = line.rstrip().split('\t')
seq = parts[0]
if seq not in annotatedReadMap:
totalCount = sum(int(p) for p in parts[1:])
annotatedReadMap[seq] = AnnotationItem(seq, totalCount, category, parts[1:])
else:
annotatedReadMap[seq].Categories.append(category)
annotatedReads = sorted(annotatedReadMap.values(), key=getValue, reverse=True)
output = outputPrefix + ".tsv"
logger.info("Writing explain result:" + output + " ...")
with open(output, "w") as sw:
sw.write("ShortRead\tShortReadCount\tShortReadLength\t" + "\t".join(["SRS_" + f for f in shortReadFiles]) + "\tIsMaxMapped\tParentRead\tParentReadCount\tParentReadCategory\t" + "\t".join(["PRS_" + f for f in annotatedFiles]) + "\n")
emptyAnnotation = "\t\t\t\t" + "\t".join(["" for af in annotatedFiles]) + "\n"
for shortRead in shortReads:
shortSeq = shortRead.Sequence
shortSeqCount = shortRead.TotalCount
seqMap = shortRead.SampleMap
sw.write("%s\t%s\t%d" % (shortSeq, shortSeqCount, len(shortSeq)))
for fname in shortReadFiles:
if fname in seqMap:
sw.write("\t%s" % seqMap[fname])
else:
sw.write("\t0")
sw.write("\t" + str(shortSeq in maxmappedReads))
bFound = False
for annotatedRead in annotatedReads:
annoSeq = annotatedRead.Sequence
if shortSeq in annoSeq:
bFound = True
sw.write("\t%s\t%s\t%s\t%s\n" % (annoSeq, annotatedRead.TotalCount, "/".join(annotatedRead.Categories[0]), "\t".join(annotatedRead.Counts)))
break
if not bFound:
sw.write(emptyAnnotation)
logger.info("Done.")
def main():
parser = argparse.ArgumentParser(description="Matching short reads with annotated reads.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG=False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input short reads', required=NOT_DEBUG)
parser.add_argument('-m', '--maxMapped', action='store', nargs='?', help='Input reads exceed maximum mapping to genome', required=NOT_DEBUG)
parser.add_argument('-a', '--annotated', action='store', nargs='?', help='Input annotated reads', required=NOT_DEBUG)
parser.add_argument('-n', '--names', action='store', nargs='?', help='Input annotated reads categories, split by ''', required=NOT_DEBUG)
parser.add_argument('--maxNumber', action='store', default=100, nargs='?', help='Input number of top short reads for annotation')
parser.add_argument('--minReadCount', action='store', default=3, nargs='?', help='Input minimum copy of short reads in sample for annotation')
parser.add_argument('--minSampleCount', action='store', default=2, nargs='?', help='Input minimum number of sample with valid read count')
parser.add_argument('-o', '--output', action='store', nargs='?', default="-", help="Output prefix of matched reads file", required=NOT_DEBUG)
if NOT_DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList1.list"
args.maxMapped = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList2.list"
args.annotated = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList3.list"
args.names = "Host miRNA,Host tRNA,Host snRNA,Host snoRNA,Host rRNA,Host other small RNA,Host Genome,Microbiome Bacteria,Environment Bacteria,Fungus,Non host tRNA,Non host rRNA"
#args.names = "Host miRNA,Host tRNA"
args.output = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match2"
logger = logging.getLogger('updateCount')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
match(logger, args.input, args.names, args.annotated, args.maxMapped, args.maxNumber, args.minReadCount, args.minSampleCount, args.output)
if __name__ == "__main__":
main()
|
pooyapooya/rizpardazande
|
refs/heads/master
|
rizpar/lib/python2.7/site-packages/serial/urlhandler/protocol_socket.py
|
11
|
#! python
#
# This module implements a simple socket based client.
# It does not support changing any port parameters and will silently ignore any
# requests to do so.
#
# The purpose of this module is that applications using pySerial can connect to
# TCP/IP to serial port converters that do not support RFC 2217.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# URL format: socket://<host>:<port>[/option[/option...]]
# options:
# - "debug" print diagnostic messages
import errno
import logging
import select
import socket
import time
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from serial.serialutil import SerialBase, SerialException, portNotOpenError, to_bytes
# map log level names to constants. used in from_url()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
POLL_TIMEOUT = 5
class Serial(SerialBase):
"""Serial port implementation for plain sockets."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
try:
# timeout is used for write timeout support :/ and to get an initial connection timeout
self._socket = socket.create_connection(self.from_url(self.portstr), timeout=POLL_TIMEOUT)
except Exception as msg:
self._socket = None
raise SerialException("Could not open port {}: {}".format(self.portstr, msg))
# not that there is anything to configure...
self._reconfigure_port()
# all things set up get, now a clean start
self.is_open = True
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
self.reset_input_buffer()
self.reset_output_buffer()
def _reconfigure_port(self):
"""\
Set communication parameters on opened port. For the socket://
protocol all settings are ignored!
"""
if self._socket is None:
raise SerialException("Can only operate on open ports")
if self.logger:
self.logger.info('ignored port configuration change')
def close(self):
"""Close port"""
if self.is_open:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
self.is_open = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def from_url(self, url):
"""extract host and port from an URL string"""
parts = urlparse.urlsplit(url)
if parts.scheme != "socket":
raise SerialException(
'expected a string in the form '
'"socket://<host>:<port>[?logging={debug|info|warning|error}]": '
'not starting with socket:// ({!r})'.format(parts.scheme))
try:
# process options now, directly altering self
for option, values in urlparse.parse_qs(parts.query, True).items():
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.socket')
self.logger.setLevel(LOGGER_LEVELS[values[0]])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: {!r}'.format(option))
if not 0 <= parts.port < 65536:
raise ValueError("port not in range 0...65535")
except ValueError as e:
raise SerialException(
'expected a string in the form '
'"socket://<host>:<port>[?logging={debug|info|warning|error}]": {}'.format(e))
return (parts.hostname, parts.port)
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
if not self.is_open:
raise portNotOpenError
# Poll the socket to see if it is ready for reading.
# If ready, at least one byte will be to read.
lr, lw, lx = select.select([self._socket], [], [], 0)
return len(lr)
# select based implementation, similar to posix, but only using socket API
# to be portable, additionally handle socket timeout which is used to
# emulate write timeouts
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
read = bytearray()
timeout = self._timeout
while len(read) < size:
try:
start_time = time.time()
ready, _, _ = select.select([self._socket], [], [], timeout)
# If select was used with a timeout, and the timeout occurs, it
# returns with empty lists -> thus abort read operation.
# For timeout == 0 (non-blocking operation) also abort when
# there is nothing to read.
if not ready:
break # timeout
buf = self._socket.recv(size - len(read))
# read should always return some data as select reported it was
# ready to read when we get to this point, unless it is EOF
if not buf:
raise SerialException('socket disconnected')
read.extend(buf)
if timeout is not None:
timeout -= time.time() - start_time
if timeout <= 0:
break
except socket.timeout:
# timeout is used for write support, just go reading again
pass
except socket.error as e:
# connection fails -> terminate loop
raise SerialException('connection failed ({})'.format(e))
except OSError as e:
# this is for Python 3.x where select.error is a subclass of
# OSError ignore EAGAIN errors. all other errors are shown
if e.errno != errno.EAGAIN:
raise SerialException('read failed: {}'.format(e))
except select.error as e:
# this is for Python 2.x
# ignore EAGAIN errors. all other errors are shown
# see also http://www.python.org/dev/peps/pep-3151/#select
if e[0] != errno.EAGAIN:
raise SerialException('read failed: {}'.format(e))
return bytes(read)
def write(self, data):
"""\
Output the given byte string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self.is_open:
raise portNotOpenError
try:
self._socket.sendall(to_bytes(data))
except socket.error as e:
# XXX what exception if socket connection fails
raise SerialException("socket connection failed: {}".format(e))
return len(data)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('ignored reset_input_buffer')
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('ignored reset_output_buffer')
def send_break(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('ignored send_break({!r})'.format(duration))
def _update_break_state(self):
"""Set break: Controls TXD. When active, to transmitting is
possible."""
if self.logger:
self.logger.info('ignored _update_break_state({!r})'.format(self._break_state))
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if self.logger:
self.logger.info('ignored _update_rts_state({!r})'.format(self._rts_state))
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if self.logger:
self.logger.info('ignored _update_dtr_state({!r})'.format(self._dtr_state))
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for cts')
return True
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for dsr')
return True
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for ri')
return False
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for cd)')
return True
# - - - platform specific - - -
# works on Linux and probably all the other POSIX systems
def fileno(self):
"""Get the file handle of the underlying socket for use with select"""
return self._socket.fileno()
#
# simple client test
if __name__ == '__main__':
import sys
s = Serial('socket://localhost:7000')
sys.stdout.write('{}\n'.format(s))
sys.stdout.write("write...\n")
s.write(b"hello\n")
s.flush()
sys.stdout.write("read: {}\n".format(s.read(5)))
s.close()
|
c4all/c4all
|
refs/heads/master
|
comments/tests/site.py
|
1
|
from django.test import TestCase
from comments.forms import SiteForm
class SiteTestCase(TestCase):
pass
class SiteFormTestCase(TestCase):
def test_form_validation_success(self):
domain = 'www.google.com'
form = SiteForm(data={'domain': domain})
if form.is_valid():
site = form.save()
self.assertEqual(site.domain, domain)
def test_url_contains_protocol_success(self):
domain = 'https://www.google.com'
form = SiteForm(data={'domain': domain})
if form.is_valid():
site = form.save()
self.assertEqual(site.domain, "www.google.com")
def test_wrong_url_no_success(self):
domain = 'mailto:foc@example.com?subjects=pajcek.lala.net'
form = SiteForm(data={'domain': domain})
self.assertFalse(form.is_valid())
|
rec/DMXIS
|
refs/heads/master
|
Macros/Python/uu.py
|
10
|
#! /usr/bin/env python
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
out_file = open(out_file, 'w')
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(' \nend\n')
def decode(in_file, out_file=None, mode=None, quiet=0):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
if in_file == '-':
in_file = sys.stdin
elif isinstance(in_file, basestring):
in_file = open(in_file)
#
# Read until a begin is encountered or we've exhausted the file
#
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith('begin'):
continue
hdrfields = hdr.split(' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
out_file = hdrfields[2].rstrip()
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
opened = False
if out_file == '-':
out_file = sys.stdout
elif isinstance(out_file, basestring):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened = True
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip() != 'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
out_file.write(data)
s = in_file.readline()
if not s:
raise Error('Truncated input file')
if opened:
out_file.close()
def test():
"""uuencode/uudecode main program"""
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
input = sys.stdin
output = sys.stdout
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, basestring):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, basestring):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
|
psaux/huhamhire-hosts
|
refs/heads/master
|
gui/style_rc.py
|
24
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: 周三 1月 22 13:03:07 2014
# by: The Resource Compiler for PyQt (Qt v4.8.5)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x01\x57\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x09\x08\x06\x00\x00\x00\xe0\x91\x06\x10\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\
\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\xe9\x00\x00\x75\
\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\x6f\x92\x5f\xc5\
\x46\x00\x00\x00\xdd\x49\x44\x41\x54\x78\xda\x5c\x8e\xb1\x4e\x84\
\x40\x18\x84\x67\xef\x4c\x2c\xc8\xd9\x2c\x0d\x58\x50\x1b\x0b\xc3\
\xfa\x24\x77\xbd\x0d\x85\x4f\x40\x0b\xbb\xcb\x3b\xd0\x68\x41\x72\
\xc5\xd2\x28\x4f\x02\xcf\xb1\x97\x40\x61\xd4\xc2\xc4\x62\x2c\xbc\
\x4d\xd0\x49\xfe\xbf\xf8\x32\xff\x3f\x23\x48\xc2\x5a\x3b\x00\x80\
\xd6\xfa\x80\xb3\xac\xb5\x03\x49\x18\x63\x0e\x5b\x21\xc4\x90\xe7\
\xf9\x3e\x49\x92\x9b\xbe\xef\xef\xca\xb2\x7c\xf5\xde\xbf\x04\xe6\
\x9c\xbb\xbd\x20\xf9\x19\xae\x95\x52\xfb\x2c\xcb\xbe\xa5\x94\x01\
\x81\xe4\x9b\x38\xbf\x3c\x2a\xa5\x1e\xf0\x4f\xe3\x38\x3e\x37\x4d\
\xf3\x28\x48\x02\x00\xba\xae\x7b\x97\x52\xee\x82\x61\x59\x96\x8f\
\xa2\x28\xae\x00\x60\x03\x00\xc6\x98\xe3\xda\x00\x00\x71\x1c\xef\
\xb4\xd6\x4f\x00\xb0\x05\xf0\x27\x6a\x9e\x67\x44\x51\x04\x00\x48\
\xd3\xf4\xde\x39\x77\xbd\x21\xf9\xb5\xea\x70\x6a\xdb\xf6\x72\x9a\
\xa6\xd3\xaa\xf8\xef\xaa\xeb\xda\x57\x55\xe5\x49\x22\xcc\x9a\xfd\
\x0c\x00\x24\xab\x6e\xfa\x96\x21\xfc\xb8\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xf0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x07\x00\x00\x00\x05\x08\x04\x00\x00\x00\x23\x93\x3e\x53\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x03\x18\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\x63\x60\x60\x9e\xe0\xe8\xe2\xe4\xca\x24\
\xc0\xc0\x50\x50\x54\x52\xe4\x1e\xe4\x18\x19\x11\x19\xa5\xc0\x7e\
\x9e\x81\x8d\x81\x99\x81\x81\x81\x81\x81\x21\x31\xb9\xb8\xc0\x31\
\x20\xc0\x87\x81\x81\x81\x21\x2f\x3f\x2f\x95\x01\x15\x30\x32\x30\
\x7c\xbb\xc6\xc0\xc8\xc0\xc0\xc0\x70\x59\xd7\xd1\xc5\xc9\x95\x81\
\x34\xc0\x9a\x5c\x50\x54\xc2\xc0\xc0\x70\x80\x81\x81\xc1\x28\x25\
\xb5\x38\x99\x81\x81\xe1\x0b\x03\x03\x43\x7a\x79\x49\x41\x09\x03\
\x03\x63\x0c\x03\x03\x83\x48\x52\x76\x41\x09\x03\x03\x63\x01\x03\
\x03\x83\x48\x76\x48\x90\x33\x03\x03\x63\x0b\x03\x03\x13\x4f\x49\
\x6a\x45\x09\x03\x03\x03\x83\x73\x7e\x41\x65\x51\x66\x7a\x46\x89\
\x82\xa1\xa5\xa5\xa5\x82\x63\x4a\x7e\x52\xaa\x42\x70\x65\x71\x49\
\x6a\x6e\xb1\x82\x67\x5e\x72\x7e\x51\x41\x7e\x51\x62\x49\x6a\x0a\
\x03\x03\x03\xd4\x0e\x06\x06\x06\x06\x5e\x97\xfc\x12\x05\xf7\xc4\
\xcc\x3c\x05\x23\x03\x55\x06\x2a\x83\x88\xc8\x28\x05\x08\x0b\x11\
\x3e\x08\x31\x04\x48\x2e\x2d\x2a\x83\x07\x25\x03\x83\x00\x83\x02\
\x83\x01\x83\x03\x43\x00\x43\x22\x43\x3d\xc3\x02\x86\xa3\x0c\x6f\
\x18\xc5\x19\x5d\x18\x4b\x19\x57\x30\xde\x63\x12\x63\x0a\x62\x9a\
\xc0\x74\x81\x59\x98\x39\x92\x79\x21\xf3\x1b\x16\x4b\x96\x0e\x96\
\x5b\xac\x7a\xac\xad\xac\xf7\xd8\x2c\xd9\xa6\xb1\x7d\x63\x0f\x67\
\xdf\xcd\xa1\xc4\xd1\xc5\xf1\x85\x33\x91\xf3\x02\x97\x23\xd7\x16\
\x6e\x4d\xee\x05\x3c\x52\x3c\x53\x79\x85\x78\x27\xf1\x09\xf3\x4d\
\xe3\x97\xe1\x5f\x2c\xa0\x23\xb0\x43\xd0\x55\xf0\x8a\x50\xaa\xd0\
\x0f\xe1\x5e\x11\x15\x91\xbd\xa2\xe1\xa2\x5f\xc4\x26\x89\x1b\x89\
\x5f\x91\xa8\x90\x94\x93\x3c\x26\x95\x2f\x2d\x2d\x7d\x42\xa6\x4c\
\x56\x5d\xf6\x96\x5c\x9f\xbc\x8b\xfc\x1f\x85\xad\x8a\x85\x4a\x7a\
\x4a\x6f\x95\xd7\xaa\x14\xa8\x9a\xa8\xfe\x54\x3b\xa8\xde\xa5\x11\
\xaa\xa9\xa4\xf9\x41\xeb\x80\xf6\x24\x9d\x54\x5d\x2b\x3d\x41\xbd\
\x57\xfa\x47\x0c\x16\x18\xd6\x1a\xc5\x18\xdb\x9a\xc8\x9b\x32\x9b\
\xbe\x34\xbb\x60\xbe\xd3\x62\x89\xe5\x04\xab\x3a\xeb\x5c\x9b\x38\
\xdb\x40\x3b\x57\x7b\x6b\x07\x63\x47\x1d\x27\x35\x67\x25\x17\x05\
\x57\x79\x37\x05\x77\x65\x0f\x75\x4f\x5d\x2f\x13\x6f\x1b\x1f\x77\
\xdf\x60\xbf\x04\xff\xfc\x80\xfa\xc0\x89\x41\x4b\x83\x77\x85\x5c\
\x0c\x7d\x19\xce\x14\x21\x17\x69\x15\x15\x11\x5d\x11\x33\x33\x76\
\x4f\xdc\x83\x04\xb6\x44\xdd\xa4\xb0\xe4\x86\x94\x35\xa9\x37\xd3\
\x39\x32\x2c\x32\x33\xb3\xe6\x66\x5f\xcc\x65\xcf\xb3\xcf\xaf\x28\
\xd8\x54\xf8\xae\x58\xbb\x24\xab\x74\x55\xd9\x9b\x0a\xfd\xca\x92\
\xaa\x5d\x35\x8c\xb5\x5e\x75\x53\xeb\x1f\x36\xea\x35\xd5\x34\x9f\
\x6d\x95\x6b\x2b\x6c\x3f\xda\x29\xdd\x55\xd4\x7d\xba\x57\xb5\xaf\
\xb1\xff\xee\x44\x9b\x49\xb3\x27\xff\x9d\x1a\x3f\xed\xf0\x0c\x8d\
\x99\xfd\xb3\xbe\xcf\x49\x98\x7b\x7a\xbe\xf9\x82\xa5\x8b\x44\x16\
\xb7\x2e\xf9\xb6\x2c\x73\xf9\xbd\x95\x21\xab\x4e\xaf\x71\x59\xbb\
\x6f\xbd\xe5\x86\x6d\x9b\x4c\x36\x6f\xd9\x6a\xb2\x6d\xfb\x0e\xab\
\x9d\xfb\x77\xbb\xee\x39\xbb\x2f\x6c\xff\x83\x83\x39\x87\x7e\x1e\
\x69\x3f\x26\x7e\x7c\xc5\x49\xeb\x53\xe7\xce\x24\x9f\xfd\x75\x7e\
\xd2\x45\xed\x4b\x47\xaf\x24\x5e\xfd\x77\x7d\xce\x4d\x9b\x5b\x77\
\xef\xd4\xdf\x53\xbe\x7f\xe2\x61\xde\x63\xb1\x27\xfb\x9f\x65\xbe\
\x10\x79\x79\xf0\x75\xfe\x5b\xf9\x77\x17\x3e\x34\x7d\x32\xfd\xfc\
\xea\xeb\x82\xef\xe1\x3f\x05\x7e\x9d\xfa\xd3\xfa\xcf\xf1\xff\x7f\
\x00\x0d\x00\x0f\x34\xfa\x96\xf1\x5d\x00\x00\x00\x20\x63\x48\x52\
\x4d\x00\x00\x7a\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\
\xe9\x00\x00\x75\x30\x00\x00\xea\x60\x00\x00\x3a\x98\x00\x00\x17\
\x6f\x92\x5f\xc5\x46\x00\x00\x00\x52\x49\x44\x41\x54\x78\xda\x62\
\x58\xf5\xe9\xca\x3f\x18\x5c\xfe\x9e\x21\xd3\xff\xc4\x8f\xab\xbf\
\xaf\xfe\xbe\xfa\xfb\xd0\x97\x68\x63\x86\xff\x0c\x85\x6b\xf7\x7e\
\xdc\xfb\x71\xf3\x87\xcc\xbc\xff\x0c\x0c\xff\x19\x18\x98\x73\xce\
\xce\xbd\x1f\x39\xff\x3f\xc3\x7f\x06\x86\xff\x0c\xff\x19\x14\xdd\
\x2c\xb6\xfe\x67\xf8\xcf\xf0\x9f\x01\x30\x00\x6a\x5f\x2c\x67\x74\
\xda\xec\xfb\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x03\
\x00\x00\x78\xa3\
\x00\x71\
\x00\x73\x00\x73\
\x00\x03\
\x00\x00\x78\xc3\
\x00\x72\
\x00\x65\x00\x73\
\x00\x03\
\x00\x00\x70\x37\
\x00\x69\
\x00\x6d\x00\x67\
\x00\x05\
\x00\x7a\xc0\x25\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\
\x00\x0c\
\x04\x56\x23\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x04\xa2\xfc\xa7\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x24\x00\x02\x00\x00\x00\x02\x00\x00\x00\x05\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x01\x5b\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
monitoringartist/zabbix-xxl
|
refs/heads/master
|
Dockerfile/dockbix-xxl-3.4/container-files-dockbix-xxl/usr/local/src/zabbix/supervisord-listener.py
|
4
|
#! /usr/bin/python
import sys
import subprocess
def write_stdout(s):
sys.stdout.write(s)
sys.stdout.flush()
def write_stderr(s):
sys.stderr.write(s)
sys.stderr.flush()
def main(args):
while 1:
write_stdout('READY\n') # transition from ACKNOWLEDGED to READY
line = sys.stdin.readline() # read header line from stdin
write_stderr(line) # print it out to stderr
headers = dict([ x.split(':') for x in line.split() ])
data = sys.stdin.read(int(headers['len'])) # read the event payload
res = subprocess.call(args, stdout=sys.stderr); # don't mess with real stdout
write_stderr(data)
write_stdout('RESULT 2\nOK') # transition from READY to ACKNOWLEDGED
if __name__ == '__main__':
main(sys.argv[1:])
import sys
|
dmitriy0611/django
|
refs/heads/master
|
django/core/management/templates.py
|
34
|
import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
from os import path
import django
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive
from django.utils.six.moves.urllib.request import urlretrieve
from django.utils.version import get_docs_version
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template',
help='The path or URL to load the template from.')
parser.add_argument('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.')
parser.add_argument('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
'docs_version': get_docs_version(),
'django_version': django.__version__,
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Engine().from_string(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
|
pravsalam/clamav-antivirus
|
refs/heads/master
|
libclamav/c++/llvm/utils/lit/lit/ShUtil.py
|
28
|
import itertools
import Util
from ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
'\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
elif c == "'":
self.eat()
str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
Util.warning("escape at end of quoted argument in: %r" %
self.data)
return str
str += self.eat()
else:
str += self.eat()
return str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
Util.warning("escape at end of quoted argument in: %r" %
self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
Util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError,"Fast path failure: %r != %r" % (res, reference)
if self.pos != end:
raise ValueError,"Fast path failure: %r != %r" % (self.pos, end)
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c in ';!':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False):
self.data = data
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
try:
return self.tokens.next()
except StopIteration:
return None
def look(self):
next = self.lex()
if next is not None:
self.tokens = itertools.chain([next], self.tokens)
return next
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError,"empty command!"
if isinstance(tok, tuple):
raise ValueError,"syntax error near unexpected token %r" % tok[0]
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, str):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError,"syntax error near token %r" % op[0]
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
if self.look() == ('!',):
self.lex()
negate = True
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError, "missing argument to operator %r" % operator[0]
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
###
import unittest
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
('<',), 'e'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'),
['a', ('>',2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(""" 'a' """),
['a'])
self.assertEqual(self.lex(""" "hello\\"world" """),
['hello"world'])
self.assertEqual(self.lex(""" "hello\\'world" """),
["hello\\'world"])
self.assertEqual(self.lex(""" "hello\\\\world" """),
["hello\\world"])
self.assertEqual(self.lex(""" he"llo wo"rld """),
["hello world"])
self.assertEqual(self.lex(""" a\\ b a\\\\b """),
["a b", "a\\b"])
self.assertEqual(self.lex(""" "" "" """),
["", ""])
self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
['a\\', 'b'])
class TestShParse(unittest.TestCase):
def parse(self, str):
return ShParser(str).parse()
def test_basic(self):
self.assertEqual(self.parse('echo hello'),
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
self.assertEqual(self.parse("""echo -DFOO='a'"""),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
self.assertEqual(self.parse('echo -DFOO="a"'),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
Pipeline([Command(['echo', 'hello'],
[((('>'),), 'c')])], False))
self.assertEqual(self.parse('echo hello > c >> d'),
Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
(('>>',), 'd')])], False))
self.assertEqual(self.parse('a 2>&1'),
Pipeline([Command(['a'], [(('>&',2), '1')])], False))
def test_pipeline(self):
self.assertEqual(self.parse('a | b'),
Pipeline([Command(['a'], []),
Command(['b'], [])],
False))
self.assertEqual(self.parse('a | b | c'),
Pipeline([Command(['a'], []),
Command(['b'], []),
Command(['c'], [])],
False))
self.assertEqual(self.parse('! a'),
Pipeline([Command(['a'], [])],
True))
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a & b'),
Seq(Pipeline([Command(['a'], [])], False),
'&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b'),
Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a || b'),
Seq(Pipeline([Command(['a'], [])], False),
'||',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b || c'),
Seq(Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)),
'||',
Pipeline([Command(['c'], [])], False)))
if __name__ == '__main__':
unittest.main()
|
cwyark/micropython
|
refs/heads/master
|
esp8266/modules/onewire.py
|
12
|
# 1-Wire driver for MicroPython on ESP8266
# MIT license; Copyright (c) 2016 Damien P. George
from micropython import const
import _onewire as _ow
class OneWireError(Exception):
pass
class OneWire:
SEARCH_ROM = const(0xf0)
MATCH_ROM = const(0x55)
SKIP_ROM = const(0xcc)
def __init__(self, pin):
self.pin = pin
self.pin.init(pin.OPEN_DRAIN)
def reset(self, required=False):
reset = _ow.reset(self.pin)
if required and not reset:
raise OneWireError
return reset
def readbit(self):
return _ow.readbit(self.pin)
def readbyte(self):
return _ow.readbyte(self.pin)
def readinto(self, buf):
for i in range(len(buf)):
buf[i] = _ow.readbyte(self.pin)
def writebit(self, value):
return _ow.writebit(self.pin, value)
def writebyte(self, value):
return _ow.writebyte(self.pin, value)
def write(self, buf):
for b in buf:
_ow.writebyte(self.pin, b)
def select_rom(self, rom):
self.reset()
self.writebyte(MATCH_ROM)
self.write(rom)
def scan(self):
devices = []
diff = 65
rom = False
for i in range(0xff):
rom, diff = self._search_rom(rom, diff)
if rom:
devices += [rom]
if diff == 0:
break
return devices
def _search_rom(self, l_rom, diff):
if not self.reset():
return None, 0
self.writebyte(SEARCH_ROM)
if not l_rom:
l_rom = bytearray(8)
rom = bytearray(8)
next_diff = 0
i = 64
for byte in range(8):
r_b = 0
for bit in range(8):
b = self.readbit()
if self.readbit():
if b: # there are no devices or there is an error on the bus
return None, 0
else:
if not b: # collision, two devices with different bit meaning
if diff > i or ((l_rom[byte] & (1 << bit)) and diff != i):
b = 1
next_diff = i
self.writebit(b)
if b:
r_b |= 1 << bit
i -= 1
rom[byte] = r_b
return rom, next_diff
def crc8(self, data):
return _ow.crc8(data)
|
ibinti/intellij-community
|
refs/heads/master
|
python/testData/mover/commentUp.py
|
83
|
def f():
if True:
a = 1
else:
a = 2
#comment<caret>
|
QuickSander/CouchPotatoServer
|
refs/heads/master
|
libs/requests/packages/chardet/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
iparanza/earthenterprise
|
refs/heads/master
|
earth_enterprise/src/support/parse_khhttpd_access_log.py
|
9
|
#! /usr/bin/python2.4
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# parse_khhttpd_access_log.py
#
# Parses an input GEE server log, searching for imagery requests.
# Output is either:
# 1. a CSV containing lat/lon/level (and other info) for each imagery request, or
# 2. a KML file with placemarks at each imagery request location, where the name of
# the placemark is the number of requests seen at that location.
#
# TODO: separate out KML output code into routine like CSV already is.
# TODO: compile imagery-recognition regexp in KML routine like CSV does.
# TODO: read initially into quad_dict instead of making list and de-duplicating.
# TODO: remove IP logging so Earth users aren't concerned about watching their use.
# TODO: determine output type from extension on output file
# TODO: pass output file into KML class and just my_kml.openDoc() etc.
#
import re
import sys
def Usage():
'''Tell the user how the program should be invoked.'''
print 'Usage:\n'
print ' log_parser.py <input_file> <output_file> <file_type>\n'
print 'Example: log_parser.py khhttpd_access_log access_log.kml kml\n'
print ' or: log_parser.py khhttpd_access_log access_log.csv csv\n'
def main():
if len(sys.argv) < 4:
Usage()
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
filetype = sys.argv[3].lower()
input = open(infile).readlines()
output = open(outfile, 'w')
if filetype == 'csv':
MakeCSV(input, output)
sys.exit(1)
quad_addrs = []
for line in input:
quad_addr = ParseQuad(line)
if quad_addr:
quad_addrs.append(quad_addr)
quad_dict = DeDupeQuads(quad_addrs)
my_kml = KML()
output.write(my_kml.openDoc('0'))
output.write(my_kml.openFolder(infile, '1'))
for addr in quad_dict.keys():
xy_coords = ProcessQuad(addr)
count = quad_dict[addr]
output.write(my_kml.MakePoint(xy_coords, count))
output.write(my_kml.closeFolder())
output.write(my_kml.closeDoc())
#######################################################################################
def ParseQuad(line):
'''Check for imagery (q2) requests and parse out quad-tree address.'''
quad_regex = re.compile(r'.*q2-(.*)-')
quad_match = quad_regex.match(line)
if quad_match:
quad_addr = quad_match.group(1)
return quad_addr
def DeDupeQuads(quad_addrs):
'''Identify unique quad-tree addresses and keep track of their use.'''
quad_dict = {}
for address in quad_addrs:
if address not in quad_dict:
quad_dict[address] = 1
else:
quad_dict[address] += 1
return quad_dict
#######################################################################################
def MakeCSV(input, output):
'''Parse the input log file and create pipe-delimited "|" output.'''
header = 'ip|date|lon|lat|level|req_code|bytes\n'
output.write(header)
image_regex = re.compile(r'.*q2-.*-')
for line in input:
line_match = image_regex.match(line)
if line_match:
ip_match = re.match(r'^(.+?)\s-', line)
ip = ip_match.group(1)
date_match = re.match(r'.*\[(.+?)\]', line)
date = date_match.group(1)
quad_match = re.match(r'.*q2-(.*)-', line)
quad = quad_match.group(1)
xy_coords = ProcessQuad(quad_match.group(1))
lon = xy_coords[0]
lat = xy_coords[1]
level = len(quad_match.group(1))
apache_codes_match = re.match(r'.*\s(\d+?\s\d+?)$', line)
apache_codes = apache_codes_match.group(1)
req_code = apache_codes.split()[0]
bytes = apache_codes.split()[1]
csv_string = '%s|%s|%f|%f|%s|%s|%s\n' % (ip, date, lon, lat, level, req_code, bytes)
output.write(csv_string)
#######################################################################################
def ProcessQuad(addr):
'''Convert the quad address string into a list and send it off for coords.'''
tile_list = list(addr)
tile_list.reverse()
tile_list.pop()
xy_range = 180.0
x_coord = 0.0
y_coord = 0.0
new_coords = Quad2GCS(tile_list, x_coord, y_coord, xy_range)
return new_coords
def Quad2GCS(addr_list, x_coord, y_coord, xy_range):
'''Drill down through quad-tree to get final x,y coords.'''
if not addr_list:
new_coords = (x_coord, y_coord)
return new_coords
else:
tile_addr = addr_list.pop()
new_range = xy_range/2
if tile_addr == '0':
x_coord -= new_range
y_coord -= new_range
if tile_addr == '1':
x_coord += new_range
y_coord -= new_range
if tile_addr == '2':
x_coord += new_range
y_coord += new_range
if tile_addr == '3':
x_coord -= new_range
y_coord += new_range
return Quad2GCS(addr_list, x_coord, y_coord, new_range)
#######################################################################################
class KML:
'''builds kml objects'''
def openDoc(self, visibility):
'''Opens kml file, and creates root level document.
Takes visibility toggle of "0" or "1" as input and sets Document <open>
attribute accordingly.
'''
self.visibility = visibility
kml = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<kml xmlns="http://earth.google.com/kml/2.1">\n'
'<Document>\n'
'<open>%s</open>\n') % (self.visibility)
return kml
def openFolder(self, name, visibility):
'''Creates folder element and sets the "<name>" and "<open>" attributes.
Takes folder name string and visibility toggle of "0" or "1" as input
values.
'''
kml = '<Folder>\n<open>%s</open>\n<name>%s</name>\n' % (visibility, name)
return kml
def MakePoint(self, coords, name):
'''Create point placemark.'''
x = coords[0]
y = coords[1]
kml = ('<Placemark>\n'
'<name>%s</name>\n'
'<visibility>1</visibility>\n'
'<Point>\n'
'<coordinates>%f, %f</coordinates>\n'
'</Point>\n'
'</Placemark>\n\n') % (name, x, y)
return kml
def closeFolder(self):
'''Closes folder element.'''
kml = '</Folder>\n'
return kml
def closeDoc(self):
'''Closes KML document'''
kml = '</Document>\n</kml>\n'
return kml
#######################################################################################
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.