repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
oasiswork/odoo | refs/heads/8.0 | addons/account_analytic_analysis/res_config.py | 426 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_template_required': fields.boolean("Mandatory use of templates.",
implied_group='account_analytic_analysis.group_template_required',
help="Allows you to set the template field as required when creating an analytic account or a contract."),
}
|
jhoon2816/KU-PL | refs/heads/master | qna/urls.py | 1 | from django.conf.urls import include
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.question_list, name='question_list'),
url(r'^qna/(?P<pk>[0-9]+)/$', views.question_detail, name='question_detail'),
url(r'^(?P<pk>[0-9]+)/edit/$', views.question_edit, name='question_edit'),
url(r'^new/$', views.question_new, name='question_new'),
url(r'^(?P<pk>[0-9]+)/remove/$', views.question_remove, name='question_remove'),
]
|
guettli/django | refs/heads/master | tests/validation/test_custom_messages.py | 519 | from . import ValidationTestCase
from .models import CustomMessagesModel
class CustomMessagesTest(ValidationTestCase):
def test_custom_simple_validator_message(self):
cmm = CustomMessagesModel(number=12)
self.assertFieldFailsValidationWithMessage(cmm.full_clean, 'number', ['AAARGH'])
def test_custom_null_message(self):
cmm = CustomMessagesModel()
self.assertFieldFailsValidationWithMessage(cmm.full_clean, 'number', ['NULL'])
|
ForkedReposBak/mxnet | refs/heads/master | contrib/tvmop/basic/__init__.py | 13 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
from . import ufunc
|
KaelChen/numpy | refs/heads/master | numpy/polynomial/legendre.py | 75 | """
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Legendre(P.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
arguement `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = legvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
|
ucrawler/cp-uc | refs/heads/master | libs/xmpp/protocol.py | 199 | ## protocol.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: protocol.py,v 1.60 2009/04/07 11:14:28 snakeru Exp $
"""
Protocol module contains tools that is needed for processing of
xmpp-related data structures.
"""
from simplexml import Node,ustr
import time
NS_ACTIVITY ='http://jabber.org/protocol/activity' # XEP-0108
NS_ADDRESS ='http://jabber.org/protocol/address' # XEP-0033
NS_ADMIN ='http://jabber.org/protocol/admin' # XEP-0133
NS_ADMIN_ADD_USER =NS_ADMIN+'#add-user' # XEP-0133
NS_ADMIN_DELETE_USER =NS_ADMIN+'#delete-user' # XEP-0133
NS_ADMIN_DISABLE_USER =NS_ADMIN+'#disable-user' # XEP-0133
NS_ADMIN_REENABLE_USER =NS_ADMIN+'#reenable-user' # XEP-0133
NS_ADMIN_END_USER_SESSION =NS_ADMIN+'#end-user-session' # XEP-0133
NS_ADMIN_GET_USER_PASSWORD =NS_ADMIN+'#get-user-password' # XEP-0133
NS_ADMIN_CHANGE_USER_PASSWORD =NS_ADMIN+'#change-user-password' # XEP-0133
NS_ADMIN_GET_USER_ROSTER =NS_ADMIN+'#get-user-roster' # XEP-0133
NS_ADMIN_GET_USER_LASTLOGIN =NS_ADMIN+'#get-user-lastlogin' # XEP-0133
NS_ADMIN_USER_STATS =NS_ADMIN+'#user-stats' # XEP-0133
NS_ADMIN_EDIT_BLACKLIST =NS_ADMIN+'#edit-blacklist' # XEP-0133
NS_ADMIN_EDIT_WHITELIST =NS_ADMIN+'#edit-whitelist' # XEP-0133
NS_ADMIN_REGISTERED_USERS_NUM =NS_ADMIN+'#get-registered-users-num' # XEP-0133
NS_ADMIN_DISABLED_USERS_NUM =NS_ADMIN+'#get-disabled-users-num' # XEP-0133
NS_ADMIN_ONLINE_USERS_NUM =NS_ADMIN+'#get-online-users-num' # XEP-0133
NS_ADMIN_ACTIVE_USERS_NUM =NS_ADMIN+'#get-active-users-num' # XEP-0133
NS_ADMIN_IDLE_USERS_NUM =NS_ADMIN+'#get-idle-users-num' # XEP-0133
NS_ADMIN_REGISTERED_USERS_LIST =NS_ADMIN+'#get-registered-users-list' # XEP-0133
NS_ADMIN_DISABLED_USERS_LIST =NS_ADMIN+'#get-disabled-users-list' # XEP-0133
NS_ADMIN_ONLINE_USERS_LIST =NS_ADMIN+'#get-online-users-list' # XEP-0133
NS_ADMIN_ACTIVE_USERS_LIST =NS_ADMIN+'#get-active-users-list' # XEP-0133
NS_ADMIN_IDLE_USERS_LIST =NS_ADMIN+'#get-idle-users-list' # XEP-0133
NS_ADMIN_ANNOUNCE =NS_ADMIN+'#announce' # XEP-0133
NS_ADMIN_SET_MOTD =NS_ADMIN+'#set-motd' # XEP-0133
NS_ADMIN_EDIT_MOTD =NS_ADMIN+'#edit-motd' # XEP-0133
NS_ADMIN_DELETE_MOTD =NS_ADMIN+'#delete-motd' # XEP-0133
NS_ADMIN_SET_WELCOME =NS_ADMIN+'#set-welcome' # XEP-0133
NS_ADMIN_DELETE_WELCOME =NS_ADMIN+'#delete-welcome' # XEP-0133
NS_ADMIN_EDIT_ADMIN =NS_ADMIN+'#edit-admin' # XEP-0133
NS_ADMIN_RESTART =NS_ADMIN+'#restart' # XEP-0133
NS_ADMIN_SHUTDOWN =NS_ADMIN+'#shutdown' # XEP-0133
NS_AGENTS ='jabber:iq:agents' # XEP-0094 (historical)
NS_AMP ='http://jabber.org/protocol/amp' # XEP-0079
NS_AMP_ERRORS =NS_AMP+'#errors' # XEP-0079
NS_AUTH ='jabber:iq:auth' # XEP-0078
NS_AVATAR ='jabber:iq:avatar' # XEP-0008 (historical)
NS_BIND ='urn:ietf:params:xml:ns:xmpp-bind' # RFC 3920
NS_BROWSE ='jabber:iq:browse' # XEP-0011 (historical)
NS_BYTESTREAM ='http://jabber.org/protocol/bytestreams' # XEP-0065
NS_CAPS ='http://jabber.org/protocol/caps' # XEP-0115
NS_CHATSTATES ='http://jabber.org/protocol/chatstates' # XEP-0085
NS_CLIENT ='jabber:client' # RFC 3921
NS_COMMANDS ='http://jabber.org/protocol/commands' # XEP-0050
NS_COMPONENT_ACCEPT ='jabber:component:accept' # XEP-0114
NS_COMPONENT_1 ='http://jabberd.jabberstudio.org/ns/component/1.0' # Jabberd2
NS_COMPRESS ='http://jabber.org/protocol/compress' # XEP-0138
NS_DATA ='jabber:x:data' # XEP-0004
NS_DATA_LAYOUT ='http://jabber.org/protocol/xdata-layout' # XEP-0141
NS_DATA_VALIDATE ='http://jabber.org/protocol/xdata-validate' # XEP-0122
NS_DELAY ='jabber:x:delay' # XEP-0091 (deprecated)
NS_DIALBACK ='jabber:server:dialback' # RFC 3921
NS_DISCO ='http://jabber.org/protocol/disco' # XEP-0030
NS_DISCO_INFO =NS_DISCO+'#info' # XEP-0030
NS_DISCO_ITEMS =NS_DISCO+'#items' # XEP-0030
NS_ENCRYPTED ='jabber:x:encrypted' # XEP-0027
NS_EVENT ='jabber:x:event' # XEP-0022 (deprecated)
NS_FEATURE ='http://jabber.org/protocol/feature-neg' # XEP-0020
NS_FILE ='http://jabber.org/protocol/si/profile/file-transfer' # XEP-0096
NS_GATEWAY ='jabber:iq:gateway' # XEP-0100
NS_GEOLOC ='http://jabber.org/protocol/geoloc' # XEP-0080
NS_GROUPCHAT ='gc-1.0' # XEP-0045
NS_HTTP_BIND ='http://jabber.org/protocol/httpbind' # XEP-0124
NS_IBB ='http://jabber.org/protocol/ibb' # XEP-0047
NS_INVISIBLE ='presence-invisible' # Jabberd2
NS_IQ ='iq' # Jabberd2
NS_LAST ='jabber:iq:last' # XEP-0012
NS_MESSAGE ='message' # Jabberd2
NS_MOOD ='http://jabber.org/protocol/mood' # XEP-0107
NS_MUC ='http://jabber.org/protocol/muc' # XEP-0045
NS_MUC_ADMIN =NS_MUC+'#admin' # XEP-0045
NS_MUC_OWNER =NS_MUC+'#owner' # XEP-0045
NS_MUC_UNIQUE =NS_MUC+'#unique' # XEP-0045
NS_MUC_USER =NS_MUC+'#user' # XEP-0045
NS_MUC_REGISTER =NS_MUC+'#register' # XEP-0045
NS_MUC_REQUEST =NS_MUC+'#request' # XEP-0045
NS_MUC_ROOMCONFIG =NS_MUC+'#roomconfig' # XEP-0045
NS_MUC_ROOMINFO =NS_MUC+'#roominfo' # XEP-0045
NS_MUC_ROOMS =NS_MUC+'#rooms' # XEP-0045
NS_MUC_TRAFIC =NS_MUC+'#traffic' # XEP-0045
NS_NICK ='http://jabber.org/protocol/nick' # XEP-0172
NS_OFFLINE ='http://jabber.org/protocol/offline' # XEP-0013
NS_PHYSLOC ='http://jabber.org/protocol/physloc' # XEP-0112
NS_PRESENCE ='presence' # Jabberd2
NS_PRIVACY ='jabber:iq:privacy' # RFC 3921
NS_PRIVATE ='jabber:iq:private' # XEP-0049
NS_PUBSUB ='http://jabber.org/protocol/pubsub' # XEP-0060
NS_REGISTER ='jabber:iq:register' # XEP-0077
NS_RC ='http://jabber.org/protocol/rc' # XEP-0146
NS_ROSTER ='jabber:iq:roster' # RFC 3921
NS_ROSTERX ='http://jabber.org/protocol/rosterx' # XEP-0144
NS_RPC ='jabber:iq:rpc' # XEP-0009
NS_SASL ='urn:ietf:params:xml:ns:xmpp-sasl' # RFC 3920
NS_SEARCH ='jabber:iq:search' # XEP-0055
NS_SERVER ='jabber:server' # RFC 3921
NS_SESSION ='urn:ietf:params:xml:ns:xmpp-session' # RFC 3921
NS_SI ='http://jabber.org/protocol/si' # XEP-0096
NS_SI_PUB ='http://jabber.org/protocol/sipub' # XEP-0137
NS_SIGNED ='jabber:x:signed' # XEP-0027
NS_STANZAS ='urn:ietf:params:xml:ns:xmpp-stanzas' # RFC 3920
NS_STREAMS ='http://etherx.jabber.org/streams' # RFC 3920
NS_TIME ='jabber:iq:time' # XEP-0090 (deprecated)
NS_TLS ='urn:ietf:params:xml:ns:xmpp-tls' # RFC 3920
NS_VACATION ='http://jabber.org/protocol/vacation' # XEP-0109
NS_VCARD ='vcard-temp' # XEP-0054
NS_VCARD_UPDATE ='vcard-temp:x:update' # XEP-0153
NS_VERSION ='jabber:iq:version' # XEP-0092
NS_WAITINGLIST ='http://jabber.org/protocol/waitinglist' # XEP-0130
NS_XHTML_IM ='http://jabber.org/protocol/xhtml-im' # XEP-0071
NS_XMPP_STREAMS ='urn:ietf:params:xml:ns:xmpp-streams' # RFC 3920
xmpp_stream_error_conditions="""
bad-format -- -- -- The entity has sent XML that cannot be processed.
bad-namespace-prefix -- -- -- The entity has sent a namespace prefix that is unsupported, or has sent no namespace prefix on an element that requires such a prefix.
conflict -- -- -- The server is closing the active stream for this entity because a new stream has been initiated that conflicts with the existing stream.
connection-timeout -- -- -- The entity has not generated any traffic over the stream for some period of time.
host-gone -- -- -- The value of the 'to' attribute provided by the initiating entity in the stream header corresponds to a hostname that is no longer hosted by the server.
host-unknown -- -- -- The value of the 'to' attribute provided by the initiating entity in the stream header does not correspond to a hostname that is hosted by the server.
improper-addressing -- -- -- A stanza sent between two servers lacks a 'to' or 'from' attribute (or the attribute has no value).
internal-server-error -- -- -- The server has experienced a misconfiguration or an otherwise-undefined internal error that prevents it from servicing the stream.
invalid-from -- cancel -- -- The JID or hostname provided in a 'from' address does not match an authorized JID or validated domain negotiated between servers via SASL or dialback, or between a client and a server via authentication and resource authorization.
invalid-id -- -- -- The stream ID or dialback ID is invalid or does not match an ID previously provided.
invalid-namespace -- -- -- The streams namespace name is something other than "http://etherx.jabber.org/streams" or the dialback namespace name is something other than "jabber:server:dialback".
invalid-xml -- -- -- The entity has sent invalid XML over the stream to a server that performs validation.
not-authorized -- -- -- The entity has attempted to send data before the stream has been authenticated, or otherwise is not authorized to perform an action related to stream negotiation.
policy-violation -- -- -- The entity has violated some local service policy.
remote-connection-failed -- -- -- The server is unable to properly connect to a remote resource that is required for authentication or authorization.
resource-constraint -- -- -- The server lacks the system resources necessary to service the stream.
restricted-xml -- -- -- The entity has attempted to send restricted XML features such as a comment, processing instruction, DTD, entity reference, or unescaped character.
see-other-host -- -- -- The server will not provide service to the initiating entity but is redirecting traffic to another host.
system-shutdown -- -- -- The server is being shut down and all active streams are being closed.
undefined-condition -- -- -- The error condition is not one of those defined by the other conditions in this list.
unsupported-encoding -- -- -- The initiating entity has encoded the stream in an encoding that is not supported by the server.
unsupported-stanza-type -- -- -- The initiating entity has sent a first-level child of the stream that is not supported by the server.
unsupported-version -- -- -- The value of the 'version' attribute provided by the initiating entity in the stream header specifies a version of XMPP that is not supported by the server.
xml-not-well-formed -- -- -- The initiating entity has sent XML that is not well-formed."""
xmpp_stanza_error_conditions="""
bad-request -- 400 -- modify -- The sender has sent XML that is malformed or that cannot be processed.
conflict -- 409 -- cancel -- Access cannot be granted because an existing resource or session exists with the same name or address.
feature-not-implemented -- 501 -- cancel -- The feature requested is not implemented by the recipient or server and therefore cannot be processed.
forbidden -- 403 -- auth -- The requesting entity does not possess the required permissions to perform the action.
gone -- 302 -- modify -- The recipient or server can no longer be contacted at this address.
internal-server-error -- 500 -- wait -- The server could not process the stanza because of a misconfiguration or an otherwise-undefined internal server error.
item-not-found -- 404 -- cancel -- The addressed JID or item requested cannot be found.
jid-malformed -- 400 -- modify -- The value of the 'to' attribute in the sender's stanza does not adhere to the syntax defined in Addressing Scheme.
not-acceptable -- 406 -- cancel -- The recipient or server understands the request but is refusing to process it because it does not meet criteria defined by the recipient or server.
not-allowed -- 405 -- cancel -- The recipient or server does not allow any entity to perform the action.
not-authorized -- 401 -- auth -- The sender must provide proper credentials before being allowed to perform the action, or has provided improper credentials.
payment-required -- 402 -- auth -- The requesting entity is not authorized to access the requested service because payment is required.
recipient-unavailable -- 404 -- wait -- The intended recipient is temporarily unavailable.
redirect -- 302 -- modify -- The recipient or server is redirecting requests for this information to another entity.
registration-required -- 407 -- auth -- The requesting entity is not authorized to access the requested service because registration is required.
remote-server-not-found -- 404 -- cancel -- A remote server or service specified as part or all of the JID of the intended recipient does not exist.
remote-server-timeout -- 504 -- wait -- A remote server or service specified as part or all of the JID of the intended recipient could not be contacted within a reasonable amount of time.
resource-constraint -- 500 -- wait -- The server or recipient lacks the system resources necessary to service the request.
service-unavailable -- 503 -- cancel -- The server or recipient does not currently provide the requested service.
subscription-required -- 407 -- auth -- The requesting entity is not authorized to access the requested service because a subscription is required.
undefined-condition -- 500 -- --
unexpected-request -- 400 -- wait -- The recipient or server understood the request but was not expecting it at this time (e.g., the request was out of order)."""
sasl_error_conditions="""
aborted -- -- -- The receiving entity acknowledges an <abort/> element sent by the initiating entity; sent in reply to the <abort/> element.
incorrect-encoding -- -- -- The data provided by the initiating entity could not be processed because the [BASE64]Josefsson, S., The Base16, Base32, and Base64 Data Encodings, July 2003. encoding is incorrect (e.g., because the encoding does not adhere to the definition in Section 3 of [BASE64]Josefsson, S., The Base16, Base32, and Base64 Data Encodings, July 2003.); sent in reply to a <response/> element or an <auth/> element with initial response data.
invalid-authzid -- -- -- The authzid provided by the initiating entity is invalid, either because it is incorrectly formatted or because the initiating entity does not have permissions to authorize that ID; sent in reply to a <response/> element or an <auth/> element with initial response data.
invalid-mechanism -- -- -- The initiating entity did not provide a mechanism or requested a mechanism that is not supported by the receiving entity; sent in reply to an <auth/> element.
mechanism-too-weak -- -- -- The mechanism requested by the initiating entity is weaker than server policy permits for that initiating entity; sent in reply to a <response/> element or an <auth/> element with initial response data.
not-authorized -- -- -- The authentication failed because the initiating entity did not provide valid credentials (this includes but is not limited to the case of an unknown username); sent in reply to a <response/> element or an <auth/> element with initial response data.
temporary-auth-failure -- -- -- The authentication failed because of a temporary error condition within the receiving entity; sent in reply to an <auth/> element or <response/> element."""
ERRORS,_errorcodes={},{}
for ns,errname,errpool in [(NS_XMPP_STREAMS,'STREAM',xmpp_stream_error_conditions),
(NS_STANZAS ,'ERR' ,xmpp_stanza_error_conditions),
(NS_SASL ,'SASL' ,sasl_error_conditions)]:
for err in errpool.split('\n')[1:]:
cond,code,typ,text=err.split(' -- ')
name=errname+'_'+cond.upper().replace('-','_')
locals()[name]=ns+' '+cond
ERRORS[ns+' '+cond]=[code,typ,text]
if code: _errorcodes[code]=cond
del ns,errname,errpool,err,cond,code,typ,text
def isResultNode(node):
""" Returns true if the node is a positive reply. """
return node and node.getType()=='result'
def isErrorNode(node):
""" Returns true if the node is a negative reply. """
return node and node.getType()=='error'
class NodeProcessed(Exception):
""" Exception that should be raised by handler when the handling should be stopped. """
class StreamError(Exception):
""" Base exception class for stream errors."""
class BadFormat(StreamError): pass
class BadNamespacePrefix(StreamError): pass
class Conflict(StreamError): pass
class ConnectionTimeout(StreamError): pass
class HostGone(StreamError): pass
class HostUnknown(StreamError): pass
class ImproperAddressing(StreamError): pass
class InternalServerError(StreamError): pass
class InvalidFrom(StreamError): pass
class InvalidID(StreamError): pass
class InvalidNamespace(StreamError): pass
class InvalidXML(StreamError): pass
class NotAuthorized(StreamError): pass
class PolicyViolation(StreamError): pass
class RemoteConnectionFailed(StreamError): pass
class ResourceConstraint(StreamError): pass
class RestrictedXML(StreamError): pass
class SeeOtherHost(StreamError): pass
class SystemShutdown(StreamError): pass
class UndefinedCondition(StreamError): pass
class UnsupportedEncoding(StreamError): pass
class UnsupportedStanzaType(StreamError): pass
class UnsupportedVersion(StreamError): pass
class XMLNotWellFormed(StreamError): pass
stream_exceptions = {'bad-format': BadFormat,
'bad-namespace-prefix': BadNamespacePrefix,
'conflict': Conflict,
'connection-timeout': ConnectionTimeout,
'host-gone': HostGone,
'host-unknown': HostUnknown,
'improper-addressing': ImproperAddressing,
'internal-server-error': InternalServerError,
'invalid-from': InvalidFrom,
'invalid-id': InvalidID,
'invalid-namespace': InvalidNamespace,
'invalid-xml': InvalidXML,
'not-authorized': NotAuthorized,
'policy-violation': PolicyViolation,
'remote-connection-failed': RemoteConnectionFailed,
'resource-constraint': ResourceConstraint,
'restricted-xml': RestrictedXML,
'see-other-host': SeeOtherHost,
'system-shutdown': SystemShutdown,
'undefined-condition': UndefinedCondition,
'unsupported-encoding': UnsupportedEncoding,
'unsupported-stanza-type': UnsupportedStanzaType,
'unsupported-version': UnsupportedVersion,
'xml-not-well-formed': XMLNotWellFormed}
class JID:
""" JID object. JID can be built from string, modified, compared, serialised into string. """
def __init__(self, jid=None, node='', domain='', resource=''):
""" Constructor. JID can be specified as string (jid argument) or as separate parts.
Examples:
JID('node@domain/resource')
JID(node='node',domain='domain.org')
"""
if not jid and not domain: raise ValueError('JID must contain at least domain name')
elif type(jid)==type(self): self.node,self.domain,self.resource=jid.node,jid.domain,jid.resource
elif domain: self.node,self.domain,self.resource=node,domain,resource
else:
if jid.find('@')+1: self.node,jid=jid.split('@',1)
else: self.node=''
if jid.find('/')+1: self.domain,self.resource=jid.split('/',1)
else: self.domain,self.resource=jid,''
def getNode(self):
""" Return the node part of the JID """
return self.node
def setNode(self,node):
""" Set the node part of the JID to new value. Specify None to remove the node part."""
self.node=node.lower()
def getDomain(self):
""" Return the domain part of the JID """
return self.domain
def setDomain(self,domain):
""" Set the domain part of the JID to new value."""
self.domain=domain.lower()
def getResource(self):
""" Return the resource part of the JID """
return self.resource
def setResource(self,resource):
""" Set the resource part of the JID to new value. Specify None to remove the resource part."""
self.resource=resource
def getStripped(self):
""" Return the bare representation of JID. I.e. string value w/o resource. """
return self.__str__(0)
def __eq__(self, other):
""" Compare the JID to another instance or to string for equality. """
try: other=JID(other)
except ValueError: return 0
return self.resource==other.resource and self.__str__(0) == other.__str__(0)
def __ne__(self, other):
""" Compare the JID to another instance or to string for non-equality. """
return not self.__eq__(other)
def bareMatch(self, other):
""" Compare the node and domain parts of the JID's for equality. """
return self.__str__(0) == JID(other).__str__(0)
def __str__(self,wresource=1):
""" Serialise JID into string. """
if self.node: jid=self.node+'@'+self.domain
else: jid=self.domain
if wresource and self.resource: return jid+'/'+self.resource
return jid
def __hash__(self):
""" Produce hash of the JID, Allows to use JID objects as keys of the dictionary. """
return hash(self.__str__())
class Protocol(Node):
""" A "stanza" object class. Contains methods that are common for presences, iqs and messages. """
def __init__(self, name=None, to=None, typ=None, frm=None, attrs={}, payload=[], timestamp=None, xmlns=None, node=None):
""" Constructor, name is the name of the stanza i.e. 'message' or 'presence' or 'iq'.
to is the value of 'to' attribure, 'typ' - 'type' attribute
frn - from attribure, attrs - other attributes mapping, payload - same meaning as for simplexml payload definition
timestamp - the time value that needs to be stamped over stanza
xmlns - namespace of top stanza node
node - parsed or unparsed stana to be taken as prototype.
"""
if not attrs: attrs={}
if to: attrs['to']=to
if frm: attrs['from']=frm
if typ: attrs['type']=typ
Node.__init__(self, tag=name, attrs=attrs, payload=payload, node=node)
if not node and xmlns: self.setNamespace(xmlns)
if self['to']: self.setTo(self['to'])
if self['from']: self.setFrom(self['from'])
if node and type(self)==type(node) and self.__class__==node.__class__ and self.attrs.has_key('id'): del self.attrs['id']
self.timestamp=None
for x in self.getTags('x',namespace=NS_DELAY):
try:
if not self.getTimestamp() or x.getAttr('stamp')<self.getTimestamp(): self.setTimestamp(x.getAttr('stamp'))
except: pass
if timestamp is not None: self.setTimestamp(timestamp) # To auto-timestamp stanza just pass timestamp=''
def getTo(self):
""" Return value of the 'to' attribute. """
try: return self['to']
except: return None
def getFrom(self):
""" Return value of the 'from' attribute. """
try: return self['from']
except: return None
def getTimestamp(self):
""" Return the timestamp in the 'yyyymmddThhmmss' format. """
return self.timestamp
def getID(self):
""" Return the value of the 'id' attribute. """
return self.getAttr('id')
def setTo(self,val):
""" Set the value of the 'to' attribute. """
self.setAttr('to', JID(val))
def getType(self):
""" Return the value of the 'type' attribute. """
return self.getAttr('type')
def setFrom(self,val):
""" Set the value of the 'from' attribute. """
self.setAttr('from', JID(val))
def setType(self,val):
""" Set the value of the 'type' attribute. """
self.setAttr('type', val)
def setID(self,val):
""" Set the value of the 'id' attribute. """
self.setAttr('id', val)
def getError(self):
""" Return the error-condition (if present) or the textual description of the error (otherwise). """
errtag=self.getTag('error')
if errtag:
for tag in errtag.getChildren():
if tag.getName()<>'text': return tag.getName()
return errtag.getData()
def getErrorCode(self):
""" Return the error code. Obsolette. """
return self.getTagAttr('error','code')
def setError(self,error,code=None):
""" Set the error code. Obsolette. Use error-conditions instead. """
if code:
if str(code) in _errorcodes.keys(): error=ErrorNode(_errorcodes[str(code)],text=error)
else: error=ErrorNode(ERR_UNDEFINED_CONDITION,code=code,typ='cancel',text=error)
elif type(error) in [type(''),type(u'')]: error=ErrorNode(error)
self.setType('error')
self.addChild(node=error)
def setTimestamp(self,val=None):
"""Set the timestamp. timestamp should be the yyyymmddThhmmss string."""
if not val: val=time.strftime('%Y%m%dT%H:%M:%S', time.gmtime())
self.timestamp=val
self.setTag('x',{'stamp':self.timestamp},namespace=NS_DELAY)
def getProperties(self):
""" Return the list of namespaces to which belongs the direct childs of element"""
props=[]
for child in self.getChildren():
prop=child.getNamespace()
if prop not in props: props.append(prop)
return props
def __setitem__(self,item,val):
""" Set the item 'item' to the value 'val'."""
if item in ['to','from']: val=JID(val)
return self.setAttr(item,val)
class Message(Protocol):
""" XMPP Message stanza - "push" mechanism."""
def __init__(self, to=None, body=None, typ=None, subject=None, attrs={}, frm=None, payload=[], timestamp=None, xmlns=NS_CLIENT, node=None):
""" Create message object. You can specify recipient, text of message, type of message
any additional attributes, sender of the message, any additional payload (f.e. jabber:x:delay element) and namespace in one go.
Alternatively you can pass in the other XML object as the 'node' parameted to replicate it as message. """
Protocol.__init__(self, 'message', to=to, typ=typ, attrs=attrs, frm=frm, payload=payload, timestamp=timestamp, xmlns=xmlns, node=node)
if body: self.setBody(body)
if subject: self.setSubject(subject)
def getBody(self):
""" Returns text of the message. """
return self.getTagData('body')
def getSubject(self):
""" Returns subject of the message. """
return self.getTagData('subject')
def getThread(self):
""" Returns thread of the message. """
return self.getTagData('thread')
def setBody(self,val):
""" Sets the text of the message. """
self.setTagData('body',val)
def setSubject(self,val):
""" Sets the subject of the message. """
self.setTagData('subject',val)
def setThread(self,val):
""" Sets the thread of the message. """
self.setTagData('thread',val)
def buildReply(self,text=None):
""" Builds and returns another message object with specified text.
The to, from and thread properties of new message are pre-set as reply to this message. """
m=Message(to=self.getFrom(),frm=self.getTo(),body=text)
th=self.getThread()
if th: m.setThread(th)
return m
class Presence(Protocol):
""" XMPP Presence object."""
def __init__(self, to=None, typ=None, priority=None, show=None, status=None, attrs={}, frm=None, timestamp=None, payload=[], xmlns=NS_CLIENT, node=None):
""" Create presence object. You can specify recipient, type of message, priority, show and status values
any additional attributes, sender of the presence, timestamp, any additional payload (f.e. jabber:x:delay element) and namespace in one go.
Alternatively you can pass in the other XML object as the 'node' parameted to replicate it as presence. """
Protocol.__init__(self, 'presence', to=to, typ=typ, attrs=attrs, frm=frm, payload=payload, timestamp=timestamp, xmlns=xmlns, node=node)
if priority: self.setPriority(priority)
if show: self.setShow(show)
if status: self.setStatus(status)
def getPriority(self):
""" Returns the priority of the message. """
return self.getTagData('priority')
def getShow(self):
""" Returns the show value of the message. """
return self.getTagData('show')
def getStatus(self):
""" Returns the status string of the message. """
return self.getTagData('status')
def setPriority(self,val):
""" Sets the priority of the message. """
self.setTagData('priority',val)
def setShow(self,val):
""" Sets the show value of the message. """
self.setTagData('show',val)
def setStatus(self,val):
""" Sets the status string of the message. """
self.setTagData('status',val)
def _muc_getItemAttr(self,tag,attr):
for xtag in self.getTags('x'):
for child in xtag.getTags(tag):
return child.getAttr(attr)
def _muc_getSubTagDataAttr(self,tag,attr):
for xtag in self.getTags('x'):
for child in xtag.getTags('item'):
for cchild in child.getTags(tag):
return cchild.getData(),cchild.getAttr(attr)
return None,None
def getRole(self):
"""Returns the presence role (for groupchat)"""
return self._muc_getItemAttr('item','role')
def getAffiliation(self):
"""Returns the presence affiliation (for groupchat)"""
return self._muc_getItemAttr('item','affiliation')
def getNick(self):
"""Returns the nick value (for nick change in groupchat)"""
return self._muc_getItemAttr('item','nick')
def getJid(self):
"""Returns the presence jid (for groupchat)"""
return self._muc_getItemAttr('item','jid')
def getReason(self):
"""Returns the reason of the presence (for groupchat)"""
return self._muc_getSubTagDataAttr('reason','')[0]
def getActor(self):
"""Returns the reason of the presence (for groupchat)"""
return self._muc_getSubTagDataAttr('actor','jid')[1]
def getStatusCode(self):
"""Returns the status code of the presence (for groupchat)"""
return self._muc_getItemAttr('status','code')
class Iq(Protocol):
""" XMPP Iq object - get/set dialog mechanism. """
def __init__(self, typ=None, queryNS=None, attrs={}, to=None, frm=None, payload=[], xmlns=NS_CLIENT, node=None):
""" Create Iq object. You can specify type, query namespace
any additional attributes, recipient of the iq, sender of the iq, any additional payload (f.e. jabber:x:data node) and namespace in one go.
Alternatively you can pass in the other XML object as the 'node' parameted to replicate it as an iq. """
Protocol.__init__(self, 'iq', to=to, typ=typ, attrs=attrs, frm=frm, xmlns=xmlns, node=node)
if payload: self.setQueryPayload(payload)
if queryNS: self.setQueryNS(queryNS)
def getQueryNS(self):
""" Return the namespace of the 'query' child element."""
tag=self.getTag('query')
if tag: return tag.getNamespace()
def getQuerynode(self):
""" Return the 'node' attribute value of the 'query' child element."""
return self.getTagAttr('query','node')
def getQueryPayload(self):
""" Return the 'query' child element payload."""
tag=self.getTag('query')
if tag: return tag.getPayload()
def getQueryChildren(self):
""" Return the 'query' child element child nodes."""
tag=self.getTag('query')
if tag: return tag.getChildren()
def setQueryNS(self,namespace):
""" Set the namespace of the 'query' child element."""
self.setTag('query').setNamespace(namespace)
def setQueryPayload(self,payload):
""" Set the 'query' child element payload."""
self.setTag('query').setPayload(payload)
def setQuerynode(self,node):
""" Set the 'node' attribute value of the 'query' child element."""
self.setTagAttr('query','node',node)
def buildReply(self,typ):
""" Builds and returns another Iq object of specified type.
The to, from and query child node of new Iq are pre-set as reply to this Iq. """
iq=Iq(typ,to=self.getFrom(),frm=self.getTo(),attrs={'id':self.getID()})
if self.getTag('query'): iq.setQueryNS(self.getQueryNS())
return iq
class ErrorNode(Node):
""" XMPP-style error element.
In the case of stanza error should be attached to XMPP stanza.
In the case of stream-level errors should be used separately. """
def __init__(self,name,code=None,typ=None,text=None):
""" Create new error node object.
Mandatory parameter: name - name of error condition.
Optional parameters: code, typ, text. Used for backwards compartibility with older jabber protocol."""
if ERRORS.has_key(name):
cod,type,txt=ERRORS[name]
ns=name.split()[0]
else: cod,ns,type,txt='500',NS_STANZAS,'cancel',''
if typ: type=typ
if code: cod=code
if text: txt=text
Node.__init__(self,'error',{},[Node(name)])
if type: self.setAttr('type',type)
if not cod: self.setName('stream:error')
if txt: self.addChild(node=Node(ns+' text',{},[txt]))
if cod: self.setAttr('code',cod)
class Error(Protocol):
""" Used to quickly transform received stanza into error reply."""
def __init__(self,node,error,reply=1):
""" Create error reply basing on the received 'node' stanza and the 'error' error condition.
If the 'node' is not the received stanza but locally created ('to' and 'from' fields needs not swapping)
specify the 'reply' argument as false."""
if reply: Protocol.__init__(self,to=node.getFrom(),frm=node.getTo(),node=node)
else: Protocol.__init__(self,node=node)
self.setError(error)
if node.getType()=='error': self.__str__=self.__dupstr__
def __dupstr__(self,dup1=None,dup2=None):
""" Dummy function used as preventor of creating error node in reply to error node.
I.e. you will not be able to serialise "double" error into string.
"""
return ''
class DataField(Node):
""" This class is used in the DataForm class to describe the single data item.
If you are working with jabber:x:data (XEP-0004, XEP-0068, XEP-0122)
then you will need to work with instances of this class. """
def __init__(self,name=None,value=None,typ=None,required=0,label=None,desc=None,options=[],node=None):
""" Create new data field of specified name,value and type.
Also 'required','desc' and 'options' fields can be set.
Alternatively other XML object can be passed in as the 'node' parameted to replicate it as a new datafiled.
"""
Node.__init__(self,'field',node=node)
if name: self.setVar(name)
if type(value) in [list,tuple]: self.setValues(value)
elif value: self.setValue(value)
if typ: self.setType(typ)
elif not typ and not node: self.setType('text-single')
if required: self.setRequired(required)
if label: self.setLabel(label)
if desc: self.setDesc(desc)
if options: self.setOptions(options)
def setRequired(self,req=1):
""" Change the state of the 'required' flag. """
if req: self.setTag('required')
else:
try: self.delChild('required')
except ValueError: return
def isRequired(self):
""" Returns in this field a required one. """
return self.getTag('required')
def setLabel(self,label):
""" Set the label of this field. """
self.setAttr('label',label)
def getLabel(self):
""" Return the label of this field. """
return self.getAttr('label')
def setDesc(self,desc):
""" Set the description of this field. """
self.setTagData('desc',desc)
def getDesc(self):
""" Return the description of this field. """
return self.getTagData('desc')
def setValue(self,val):
""" Set the value of this field. """
self.setTagData('value',val)
def getValue(self):
return self.getTagData('value')
def setValues(self,lst):
""" Set the values of this field as values-list.
Replaces all previous filed values! If you need to just add a value - use addValue method."""
while self.getTag('value'): self.delChild('value')
for val in lst: self.addValue(val)
def addValue(self,val):
""" Add one more value to this field. Used in 'get' iq's or such."""
self.addChild('value',{},[val])
def getValues(self):
""" Return the list of values associated with this field."""
ret=[]
for tag in self.getTags('value'): ret.append(tag.getData())
return ret
def getOptions(self):
""" Return label-option pairs list associated with this field."""
ret=[]
for tag in self.getTags('option'): ret.append([tag.getAttr('label'),tag.getTagData('value')])
return ret
def setOptions(self,lst):
""" Set label-option pairs list associated with this field."""
while self.getTag('option'): self.delChild('option')
for opt in lst: self.addOption(opt)
def addOption(self,opt):
""" Add one more label-option pair to this field."""
if type(opt) in [str,unicode]: self.addChild('option').setTagData('value',opt)
else: self.addChild('option',{'label':opt[0]}).setTagData('value',opt[1])
def getType(self):
""" Get type of this field. """
return self.getAttr('type')
def setType(self,val):
""" Set type of this field. """
return self.setAttr('type',val)
def getVar(self):
""" Get 'var' attribute value of this field. """
return self.getAttr('var')
def setVar(self,val):
""" Set 'var' attribute value of this field. """
return self.setAttr('var',val)
class DataReported(Node):
""" This class is used in the DataForm class to describe the 'reported data field' data items which are used in
'multiple item form results' (as described in XEP-0004).
Represents the fields that will be returned from a search. This information is useful when
you try to use the jabber:iq:search namespace to return dynamic form information.
"""
def __init__(self,node=None):
""" Create new empty 'reported data' field. However, note that, according XEP-0004:
* It MUST contain one or more DataFields.
* Contained DataFields SHOULD possess a 'type' and 'label' attribute in addition to 'var' attribute
* Contained DataFields SHOULD NOT contain a <value/> element.
Alternatively other XML object can be passed in as the 'node' parameted to replicate it as a new
dataitem.
"""
Node.__init__(self,'reported',node=node)
if node:
newkids=[]
for n in self.getChildren():
if n.getName()=='field': newkids.append(DataField(node=n))
else: newkids.append(n)
self.kids=newkids
def getField(self,name):
""" Return the datafield object with name 'name' (if exists). """
return self.getTag('field',attrs={'var':name})
def setField(self,name,typ=None,label=None):
""" Create if nessessary or get the existing datafield object with name 'name' and return it.
If created, attributes 'type' and 'label' are applied to new datafield."""
f=self.getField(name)
if f: return f
return self.addChild(node=DataField(name,None,typ,0,label))
def asDict(self):
""" Represent dataitem as simple dictionary mapping of datafield names to their values."""
ret={}
for field in self.getTags('field'):
name=field.getAttr('var')
typ=field.getType()
if isinstance(typ,(str,unicode)) and typ[-6:]=='-multi':
val=[]
for i in field.getTags('value'): val.append(i.getData())
else: val=field.getTagData('value')
ret[name]=val
if self.getTag('instructions'): ret['instructions']=self.getInstructions()
return ret
def __getitem__(self,name):
""" Simple dictionary interface for getting datafields values by their names."""
item=self.getField(name)
if item: return item.getValue()
raise IndexError('No such field')
def __setitem__(self,name,val):
""" Simple dictionary interface for setting datafields values by their names."""
return self.setField(name).setValue(val)
class DataItem(Node):
""" This class is used in the DataForm class to describe data items which are used in 'multiple
item form results' (as described in XEP-0004).
"""
def __init__(self,node=None):
""" Create new empty data item. However, note that, according XEP-0004, DataItem MUST contain ALL
DataFields described in DataReported.
Alternatively other XML object can be passed in as the 'node' parameted to replicate it as a new
dataitem.
"""
Node.__init__(self,'item',node=node)
if node:
newkids=[]
for n in self.getChildren():
if n.getName()=='field': newkids.append(DataField(node=n))
else: newkids.append(n)
self.kids=newkids
def getField(self,name):
""" Return the datafield object with name 'name' (if exists). """
return self.getTag('field',attrs={'var':name})
def setField(self,name):
""" Create if nessessary or get the existing datafield object with name 'name' and return it. """
f=self.getField(name)
if f: return f
return self.addChild(node=DataField(name))
def asDict(self):
""" Represent dataitem as simple dictionary mapping of datafield names to their values."""
ret={}
for field in self.getTags('field'):
name=field.getAttr('var')
typ=field.getType()
if isinstance(typ,(str,unicode)) and typ[-6:]=='-multi':
val=[]
for i in field.getTags('value'): val.append(i.getData())
else: val=field.getTagData('value')
ret[name]=val
if self.getTag('instructions'): ret['instructions']=self.getInstructions()
return ret
def __getitem__(self,name):
""" Simple dictionary interface for getting datafields values by their names."""
item=self.getField(name)
if item: return item.getValue()
raise IndexError('No such field')
def __setitem__(self,name,val):
""" Simple dictionary interface for setting datafields values by their names."""
return self.setField(name).setValue(val)
class DataForm(Node):
""" DataForm class. Used for manipulating dataforms in XMPP.
Relevant XEPs: 0004, 0068, 0122.
Can be used in disco, pub-sub and many other applications."""
def __init__(self, typ=None, data=[], title=None, node=None):
"""
Create new dataform of type 'typ'; 'data' is the list of DataReported,
DataItem and DataField instances that this dataform contains; 'title'
is the title string.
You can specify the 'node' argument as the other node to be used as
base for constructing this dataform.
title and instructions is optional and SHOULD NOT contain newlines.
Several instructions MAY be present.
'typ' can be one of ('form' | 'submit' | 'cancel' | 'result' )
'typ' of reply iq can be ( 'result' | 'set' | 'set' | 'result' ) respectively.
'cancel' form can not contain any fields. All other forms contains AT LEAST one field.
'title' MAY be included in forms of type "form" and "result"
"""
Node.__init__(self,'x',node=node)
if node:
newkids=[]
for n in self.getChildren():
if n.getName()=='field': newkids.append(DataField(node=n))
elif n.getName()=='item': newkids.append(DataItem(node=n))
elif n.getName()=='reported': newkids.append(DataReported(node=n))
else: newkids.append(n)
self.kids=newkids
if typ: self.setType(typ)
self.setNamespace(NS_DATA)
if title: self.setTitle(title)
if type(data)==type({}):
newdata=[]
for name in data.keys(): newdata.append(DataField(name,data[name]))
data=newdata
for child in data:
if type(child) in [type(''),type(u'')]: self.addInstructions(child)
elif child.__class__.__name__=='DataField': self.kids.append(child)
elif child.__class__.__name__=='DataItem': self.kids.append(child)
elif child.__class__.__name__=='DataReported': self.kids.append(child)
else: self.kids.append(DataField(node=child))
def getType(self):
""" Return the type of dataform. """
return self.getAttr('type')
def setType(self,typ):
""" Set the type of dataform. """
self.setAttr('type',typ)
def getTitle(self):
""" Return the title of dataform. """
return self.getTagData('title')
def setTitle(self,text):
""" Set the title of dataform. """
self.setTagData('title',text)
def getInstructions(self):
""" Return the instructions of dataform. """
return self.getTagData('instructions')
def setInstructions(self,text):
""" Set the instructions of dataform. """
self.setTagData('instructions',text)
def addInstructions(self,text):
""" Add one more instruction to the dataform. """
self.addChild('instructions',{},[text])
def getField(self,name):
""" Return the datafield object with name 'name' (if exists). """
return self.getTag('field',attrs={'var':name})
def setField(self,name):
""" Create if nessessary or get the existing datafield object with name 'name' and return it. """
f=self.getField(name)
if f: return f
return self.addChild(node=DataField(name))
def asDict(self):
""" Represent dataform as simple dictionary mapping of datafield names to their values."""
ret={}
for field in self.getTags('field'):
name=field.getAttr('var')
typ=field.getType()
if isinstance(typ,(str,unicode)) and typ[-6:]=='-multi':
val=[]
for i in field.getTags('value'): val.append(i.getData())
else: val=field.getTagData('value')
ret[name]=val
if self.getTag('instructions'): ret['instructions']=self.getInstructions()
return ret
def __getitem__(self,name):
""" Simple dictionary interface for getting datafields values by their names."""
item=self.getField(name)
if item: return item.getValue()
raise IndexError('No such field')
def __setitem__(self,name,val):
""" Simple dictionary interface for setting datafields values by their names."""
return self.setField(name).setValue(val)
|
windedge/odoo | refs/heads/8.0 | addons/account/account_move_line.py | 23 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
query = ''
query_params = {}
if context.get('company_id'):
company_clause = " AND " +obj+".company_id = %(company_id)s"
query_params['company_id'] = context['company_id']
if not context.get('fiscalyear'):
if context.get('all_fiscalyear'):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = context['fiscalyear']
if isinstance(context['fiscalyear'], (int, long)):
fiscalyear_ids = [fiscalyear_ids]
query_params['fiscalyear_ids'] = tuple(fiscalyear_ids) or (0,)
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from') and context.get('date_to'):
query_params['date_from'] = context['date_from']
query_params['date_to'] = context['date_to']
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < %(date_from)s)"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= %(date_from)s AND date <= %(date_to)s)"
if state:
if state.lower() not in ['all']:
query_params['state'] = state
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = %(state)s)"
if context.get('period_from') and context.get('period_to') and not context.get('periods'):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods'):
query_params['period_ids'] = tuple(context['periods'])
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s)" + where_move_state + where_move_lines_by_date
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
query_params['date_start'] = first_period.date_start
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s AND date_start <= %(date_start)s AND id NOT IN %(period_ids)s)" + where_move_state + where_move_lines_by_date
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s AND id IN %(period_ids)s)" + where_move_state + where_move_lines_by_date
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN %(fiscalyear_ids)s)" + where_move_state + where_move_lines_by_date
if initial_bal and not context.get('periods') and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids'):
query_params['journal_ids'] = tuple(context['journal_ids'])
query += ' AND '+obj+'.journal_id IN %(journal_ids)s'
if context.get('chart_account_id'):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query_params['child_ids'] = tuple(child_ids)
query += ' AND '+obj+'.account_id IN %(child_ids)s'
query += company_clause
return cr.mogrify(query, query_params)
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': obj_line.invoice.user_id.id or uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
if isinstance(data.get('partner_id'), (int, long)):
part = partner_obj.browse(cr, uid, data['partner_id'], context=context)
elif isinstance(data.get('partner_id'), (tuple, list)):
part = partner_obj.browse(cr, uid, data['partner_id'][0], context=context)
else:
part = False
if account and part:
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
domain = (context or {}).get('on_write_domain', [])
return self.pool.get('account.move.line').search(cr, uid, domain + [['id', 'in', [l.id for l in ml.move_id.line_id]]], context=context)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = invoice_id and (invoice_id, invoice_names[invoice_id]) or False
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('move_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True, auto_join=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index '
'ON account_move_line (journal_id, period_id, state, create_uid, id DESC)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref or '',
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
amount = line.amount_residual
amount_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount = -amount
amount_currency = -amount_currency
# Get right debit / credit:
target_currency = target_currency or company_currency
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line_currency != company_currency:
total_amount = line.amount_currency
actual_debit = debit > 0 and amount_currency or 0.0
actual_credit = credit > 0 and amount_currency or 0.0
else:
total_amount = abs(debit - credit)
actual_debit = debit > 0 and amount or 0.0
actual_credit = credit > 0 and amount or 0.0
if line_currency != target_currency:
amount_currency_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ret_line['credit_currency'] = actual_credit
ret_line['debit_currency'] = actual_debit
if target_currency == company_currency:
actual_debit = debit > 0 and amount or 0.0
actual_credit = credit > 0 and amount or 0.0
total_amount = abs(debit - credit)
else:
ctx = context.copy()
ctx.update({'date': line.date})
total_amount = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, total_amount, context=ctx)
actual_debit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_debit, context=ctx)
actual_credit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_credit, context=ctx)
amount_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['debit'] = actual_debit
ret_line['credit'] = actual_credit
ret_line['amount_str'] = amount_str
ret_line['total_amount_str'] = total_amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None, filter_domain=False):
line_ids = []
if filter_domain:
line_ids = self.search(cr, uid, filter_domain, context=context)
where_clause = filter_domain and "AND l.id = ANY(%s)" or ""
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
%s
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date"""
% where_clause, (line_ids,))
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in lines:
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise osv.except_osv(_('Warning!'),
_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
localcontext = dict(context)
localcontext['journal_id'] = line.journal_id.id
localcontext['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=localcontext)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
affects_move = any(f in vals for f in ('account_id', 'journal_id', 'period_id', 'move_id', 'debit', 'credit', 'date'))
if update_check and affects_move:
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if affects_move and check and not context.get('novalidate'):
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error!'), _('You can not add/modify entries in a closed period %s of journal %s.') % (period.name, journal.name))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No Piece Number!'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
base_adjusted = False
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if base_adjusted == False:
base_adjusted = True
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
self.create(cr, uid, data, context)
#create the Tax movement
if not tax['amount'] and not tax[tax_code]:
continue
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
self.create(cr, uid, data, context)
del vals['account_tax_id']
recompute = journal.env.recompute and context.get('recompute', True)
if check and not context.get('novalidate') and (recompute or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pwmarcz/django | refs/heads/master | django/utils/regex_helper.py | 94 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
from __future__ import unicode_literals
from django.utils import six
from django.utils.six.moves import zip
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(5) Ignore comments and any of the reg-exp flags that won't change
what we construct ("iLmsu"). "(?x)" is an error, however.
(6) Raise an error on all other non-capturing (?...) forms (e.g.
look-ahead and look-behind matches) and any disjunctive ('|')
constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in "iLmsu#":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quanitifers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, six.string_types):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
|
lsqtongxin/django | refs/heads/master | django/contrib/sessions/backends/base.py | 298 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=None):
self.modified = self.modified or key in self._session
return self._session.pop(key, default)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
|
openqt/algorithms | refs/heads/master | projecteuler/pe364-comfortable-distance.py | 1 | #!/usr/bin/env python
# coding=utf-8
"""364. Comfortable distance
https://projecteuler.net/problem=364
There are N seats in a row. N people come after each other to fill the seats
according to the following rules:
1. If there is any seat whose adjacent seat(s) are not occupied take such a seat.
2. If there is no such seat and there is any seat for which only one adjacent seat is occupied take such a seat.
3. Otherwise take one of the remaining available seats.
Let T(
N
) be the number of possibilities that
N
seats are occupied by
N
people with the given rules.
The following figure shows T(4)=8.
We can verify that T(10) = 61632 and T(1 000) mod 100 000 007 = 47255094.
Find T(1 000 000) mod 100 000 007.
"""
|
juananpe/django | refs/heads/master | hello/wsgi.py | 2 | """
WSGI config for hello project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
ruuk/script.bluray.com | refs/heads/master | lib/unidecode/x085.py | 252 | data = (
'Bu ', # 0x00
'Zhang ', # 0x01
'Luo ', # 0x02
'Jiang ', # 0x03
'Man ', # 0x04
'Yan ', # 0x05
'Ling ', # 0x06
'Ji ', # 0x07
'Piao ', # 0x08
'Gun ', # 0x09
'Han ', # 0x0a
'Di ', # 0x0b
'Su ', # 0x0c
'Lu ', # 0x0d
'She ', # 0x0e
'Shang ', # 0x0f
'Di ', # 0x10
'Mie ', # 0x11
'Xun ', # 0x12
'Man ', # 0x13
'Bo ', # 0x14
'Di ', # 0x15
'Cuo ', # 0x16
'Zhe ', # 0x17
'Sen ', # 0x18
'Xuan ', # 0x19
'Wei ', # 0x1a
'Hu ', # 0x1b
'Ao ', # 0x1c
'Mi ', # 0x1d
'Lou ', # 0x1e
'Cu ', # 0x1f
'Zhong ', # 0x20
'Cai ', # 0x21
'Po ', # 0x22
'Jiang ', # 0x23
'Mi ', # 0x24
'Cong ', # 0x25
'Niao ', # 0x26
'Hui ', # 0x27
'Jun ', # 0x28
'Yin ', # 0x29
'Jian ', # 0x2a
'Yan ', # 0x2b
'Shu ', # 0x2c
'Yin ', # 0x2d
'Kui ', # 0x2e
'Chen ', # 0x2f
'Hu ', # 0x30
'Sha ', # 0x31
'Kou ', # 0x32
'Qian ', # 0x33
'Ma ', # 0x34
'Zang ', # 0x35
'Sonoko ', # 0x36
'Qiang ', # 0x37
'Dou ', # 0x38
'Lian ', # 0x39
'Lin ', # 0x3a
'Kou ', # 0x3b
'Ai ', # 0x3c
'Bi ', # 0x3d
'Li ', # 0x3e
'Wei ', # 0x3f
'Ji ', # 0x40
'Xun ', # 0x41
'Sheng ', # 0x42
'Fan ', # 0x43
'Meng ', # 0x44
'Ou ', # 0x45
'Chan ', # 0x46
'Dian ', # 0x47
'Xun ', # 0x48
'Jiao ', # 0x49
'Rui ', # 0x4a
'Rui ', # 0x4b
'Lei ', # 0x4c
'Yu ', # 0x4d
'Qiao ', # 0x4e
'Chu ', # 0x4f
'Hua ', # 0x50
'Jian ', # 0x51
'Mai ', # 0x52
'Yun ', # 0x53
'Bao ', # 0x54
'You ', # 0x55
'Qu ', # 0x56
'Lu ', # 0x57
'Rao ', # 0x58
'Hui ', # 0x59
'E ', # 0x5a
'Teng ', # 0x5b
'Fei ', # 0x5c
'Jue ', # 0x5d
'Zui ', # 0x5e
'Fa ', # 0x5f
'Ru ', # 0x60
'Fen ', # 0x61
'Kui ', # 0x62
'Shun ', # 0x63
'Rui ', # 0x64
'Ya ', # 0x65
'Xu ', # 0x66
'Fu ', # 0x67
'Jue ', # 0x68
'Dang ', # 0x69
'Wu ', # 0x6a
'Tong ', # 0x6b
'Si ', # 0x6c
'Xiao ', # 0x6d
'Xi ', # 0x6e
'Long ', # 0x6f
'Yun ', # 0x70
'[?] ', # 0x71
'Qi ', # 0x72
'Jian ', # 0x73
'Yun ', # 0x74
'Sun ', # 0x75
'Ling ', # 0x76
'Yu ', # 0x77
'Xia ', # 0x78
'Yong ', # 0x79
'Ji ', # 0x7a
'Hong ', # 0x7b
'Si ', # 0x7c
'Nong ', # 0x7d
'Lei ', # 0x7e
'Xuan ', # 0x7f
'Yun ', # 0x80
'Yu ', # 0x81
'Xi ', # 0x82
'Hao ', # 0x83
'Bo ', # 0x84
'Hao ', # 0x85
'Ai ', # 0x86
'Wei ', # 0x87
'Hui ', # 0x88
'Wei ', # 0x89
'Ji ', # 0x8a
'Ci ', # 0x8b
'Xiang ', # 0x8c
'Luan ', # 0x8d
'Mie ', # 0x8e
'Yi ', # 0x8f
'Leng ', # 0x90
'Jiang ', # 0x91
'Can ', # 0x92
'Shen ', # 0x93
'Qiang ', # 0x94
'Lian ', # 0x95
'Ke ', # 0x96
'Yuan ', # 0x97
'Da ', # 0x98
'Ti ', # 0x99
'Tang ', # 0x9a
'Xie ', # 0x9b
'Bi ', # 0x9c
'Zhan ', # 0x9d
'Sun ', # 0x9e
'Lian ', # 0x9f
'Fan ', # 0xa0
'Ding ', # 0xa1
'Jie ', # 0xa2
'Gu ', # 0xa3
'Xie ', # 0xa4
'Shu ', # 0xa5
'Jian ', # 0xa6
'Kao ', # 0xa7
'Hong ', # 0xa8
'Sa ', # 0xa9
'Xin ', # 0xaa
'Xun ', # 0xab
'Yao ', # 0xac
'Hie ', # 0xad
'Sou ', # 0xae
'Shu ', # 0xaf
'Xun ', # 0xb0
'Dui ', # 0xb1
'Pin ', # 0xb2
'Wei ', # 0xb3
'Neng ', # 0xb4
'Chou ', # 0xb5
'Mai ', # 0xb6
'Ru ', # 0xb7
'Piao ', # 0xb8
'Tai ', # 0xb9
'Qi ', # 0xba
'Zao ', # 0xbb
'Chen ', # 0xbc
'Zhen ', # 0xbd
'Er ', # 0xbe
'Ni ', # 0xbf
'Ying ', # 0xc0
'Gao ', # 0xc1
'Cong ', # 0xc2
'Xiao ', # 0xc3
'Qi ', # 0xc4
'Fa ', # 0xc5
'Jian ', # 0xc6
'Xu ', # 0xc7
'Kui ', # 0xc8
'Jie ', # 0xc9
'Bian ', # 0xca
'Diao ', # 0xcb
'Mi ', # 0xcc
'Lan ', # 0xcd
'Jin ', # 0xce
'Cang ', # 0xcf
'Miao ', # 0xd0
'Qiong ', # 0xd1
'Qie ', # 0xd2
'Xian ', # 0xd3
'[?] ', # 0xd4
'Ou ', # 0xd5
'Xian ', # 0xd6
'Su ', # 0xd7
'Lu ', # 0xd8
'Yi ', # 0xd9
'Xu ', # 0xda
'Xie ', # 0xdb
'Li ', # 0xdc
'Yi ', # 0xdd
'La ', # 0xde
'Lei ', # 0xdf
'Xiao ', # 0xe0
'Di ', # 0xe1
'Zhi ', # 0xe2
'Bei ', # 0xe3
'Teng ', # 0xe4
'Yao ', # 0xe5
'Mo ', # 0xe6
'Huan ', # 0xe7
'Piao ', # 0xe8
'Fan ', # 0xe9
'Sou ', # 0xea
'Tan ', # 0xeb
'Tui ', # 0xec
'Qiong ', # 0xed
'Qiao ', # 0xee
'Wei ', # 0xef
'Liu ', # 0xf0
'Hui ', # 0xf1
'[?] ', # 0xf2
'Gao ', # 0xf3
'Yun ', # 0xf4
'[?] ', # 0xf5
'Li ', # 0xf6
'Shu ', # 0xf7
'Chu ', # 0xf8
'Ai ', # 0xf9
'Lin ', # 0xfa
'Zao ', # 0xfb
'Xuan ', # 0xfc
'Chen ', # 0xfd
'Lai ', # 0xfe
'Huo ', # 0xff
)
|
markflorisson/pystaging | refs/heads/master | pystaging/tests/test_quotation.py | 1 | import unittest
from pystaging import *
@staging
def make_expr(c):
return quote[a + b * escape[c]]
@staging(hygiene=True)
def make_stmt(c):
with quote as body:
result = a + b * escape[c]
return body
@staging
def square(x):
return quote[escape[x] * escape[x]]
@staging
def splice_expr(x):
return escape[square(quote[x])]
class TestQuotation(unittest.TestCase):
def test_quotation_expr(self):
expr = make_expr(10)
self.assertEqual(string(expr), "(a + (b * 10))")
def test_quotation_stmt(self):
s1 = make_stmt(10)
env = {'a': 2, 'b': 5}
run(s1, env)
self.assertNotIn('result', env)
self.assertEqual(env['staged.temp.result'], 2 + 5 * 10)
def test_inline_splice_expr(self):
self.assertEqual(splice_expr(10), 100) |
barma1309/Kalista | refs/heads/master | .virtualenvs/Kalista/lib/python3.4/site-packages/django/contrib/admin/options.py | 26 | import copy
import operator
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, flatten_fieldsets, get_deleted_objects,
lookup_needs_distinct, model_format_dict, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat, ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.concrete and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
clean_lookup = LOOKUP_SEP.join(relation_parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (any(issubclass(model, related_model) for model in registered_models) and
hasattr(remote_field, 'get_related_field') and
remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/change/$', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
url(r'^(.+)/$', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'admin/RelatedObjectLookups.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp.min.js',
]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = OrderedDict(
(f, None) for f in readonly_fields
if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Returns a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a change message from a changed object.
"""
change_message = []
if add:
change_message.append(_('Added.'))
elif form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'value': value,
'obj': obj,
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'change',
'value': value,
'obj': obj,
'new_value': new_value,
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'delete',
'value': escape(obj_id),
})
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
)
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
if request.method == 'POST' and '_saveasnew' in request.POST:
object_id = None
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
# Use the change template instead of the add template.
add = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
tushar7795/MicroBlog | refs/heads/master | flask/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py | 487 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
#try:
# open(self.unique_name, "wb").close()
#except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout/10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return os.path.islink(self.lock_file) and \
os.readlink(self.lock_file) == self.unique_name
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
|
styxit/CouchPotatoServer | refs/heads/master | libs/html5lib/treewalkers/__init__.py | 1229 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
|
TomBaxter/waterbutler | refs/heads/develop | waterbutler/tasks/settings.py | 2 | import os
from pkg_resources import iter_entry_points
from kombu import Queue, Exchange
from waterbutler import settings
config = settings.child('TASKS_CONFIG')
BROKER_URL = config.get(
'BROKER_URL',
'amqp://{}:{}//'.format(
os.environ.get('RABBITMQ_PORT_5672_TCP_ADDR', ''),
os.environ.get('RABBITMQ_PORT_5672_TCP_PORT', ''),
)
)
WAIT_TIMEOUT = int(config.get('WAIT_TIMEOUT', 15))
WAIT_INTERVAL = float(config.get('WAIT_INTERVAL', 0.5))
ADHOC_BACKEND_PATH = config.get('ADHOC_BACKEND_PATH', '/tmp')
CELERY_CREATE_MISSING_QUEUES = config.get_bool('CELERY_CREATE_MISSING_QUEUES', False)
CELERY_DEFAULT_QUEUE = config.get('CELERY_DEFAULT_QUEUE', 'waterbutler')
CELERY_QUEUES = (
Queue('waterbutler', Exchange('waterbutler'), routing_key='waterbutler'),
)
# CELERY_ALWAYS_EAGER = config.get('CELERY_ALWAYS_EAGER', True)
CELERY_ALWAYS_EAGER = config.get_bool('CELERY_ALWAYS_EAGER', False)
# CELERY_RESULT_BACKEND = config.get('CELERY_RESULT_BACKEND', 'redis://')
CELERY_RESULT_BACKEND = config.get_nullable('CELERY_RESULT_BACKEND', None)
CELERY_DISABLE_RATE_LIMITS = config.get_bool('CELERY_DISABLE_RATE_LIMITS', True)
CELERY_TASK_RESULT_EXPIRES = int(config.get('CELERY_TASK_RESULT_EXPIRES', 60))
CELERY_IMPORTS = [
entry.module_name
for entry in iter_entry_points(group='waterbutler.providers.tasks', name=None)
]
CELERY_IMPORTS.extend([
'waterbutler.tasks.move'
])
CELERY_ACKS_LATE = True
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
|
MindCookin/python-exercises | refs/heads/master | chapter01/basicMathAlgorythms.py | 1 | #!/usr/bin/env python
def main():
state = input("""
What would you lke todo:
(1) Calculate sum, subs and mult of two numbers
(2) Calculate multiplication table of a given number
(3) Calculate factorial of an integer
""")
switcher = {
1: calcSumSubsMult,
2: calcMultTable,
3: calcFactorial
}
switcher[state]()
askForNext()
def askForNext():
shouldContinue = input("""
Want another try?
(0 to exit, any other number to continue)""")
if shouldContinue:
main()
def calcSumSubsMult():
x = input("enter first number ")
y = input("enter second number ")
print """Sum of {0} and {1} is {2}.
Subs of {0} and {1} is {3}.
Mult of {0} and {1} is {4}.
""".format(x, y, x+y, x-y, x*y)
def calcMultTable():
integer = input("enter an integer ")
print "%d multiplication table is %d" % (integer, [integer * i for i in range(1, 10)])
def calcFactorial():
integer = input("enter an integer ")
print "%d factorial is %d" % (integer, reduce(lambda x, y: x*y, range(1, integer + 1)))
main()
|
caslei/TfModels | refs/heads/master | slim/nets/nets_factory_test.py | 37 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import nets_factory
slim = tf.contrib.slim
class NetworksTest(tf.test.TestCase):
def testGetNetworkFn(self):
batch_size = 5
num_classes = 1000
for net in nets_factory.networks_map:
with self.test_session():
net_fn = nets_factory.get_network_fn(net, num_classes)
# Most networks use 224 as their default_image_size
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
logits, end_points = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
def testGetNetworkFnArgScope(self):
batch_size = 5
num_classes = 10
net = 'cifarnet'
with self.test_session(use_gpu=True):
net_fn = nets_factory.get_network_fn(net, num_classes)
image_size = getattr(net_fn, 'default_image_size', 224)
with slim.arg_scope([slim.model_variable, slim.variable],
device='/CPU:0'):
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
net_fn(inputs)
weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'CifarNet/conv1')[0]
self.assertDeviceEqual('/CPU:0', weights.device)
if __name__ == '__main__':
tf.test.main()
|
ansible/ansible-modules-extras | refs/heads/devel | cloud/cloudstack/cs_zone_facts.py | 48 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_zone_facts
short_description: Gathering facts of zones from Apache CloudStack based clouds.
description:
- Gathering facts from the API of a zone.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the zone.
required: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- cs_zone_facts:
name: ch-gva-1
delegate_to: localhost
- debug:
var: cloudstack_zone
'''
RETURN = '''
---
cloudstack_zone.id:
description: UUID of the zone.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
cloudstack_zone.name:
description: Name of the zone.
returned: success
type: string
sample: zone01
cloudstack_zone.dns1:
description: First DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
cloudstack_zone.dns2:
description: Second DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
cloudstack_zone.internal_dns1:
description: First internal DNS for the zone.
returned: success
type: string
sample: 8.8.8.8
cloudstack_zone.internal_dns2:
description: Second internal DNS for the zone.
returned: success
type: string
sample: 8.8.4.4
cloudstack_zone.dns1_ipv6:
description: First IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8888"
cloudstack_zone.dns2_ipv6:
description: Second IPv6 DNS for the zone.
returned: success
type: string
sample: "2001:4860:4860::8844"
cloudstack_zone.allocation_state:
description: State of the zone.
returned: success
type: string
sample: Enabled
cloudstack_zone.domain:
description: Domain the zone is related to.
returned: success
type: string
sample: ROOT
cloudstack_zone.network_domain:
description: Network domain for the zone.
returned: success
type: string
sample: example.com
cloudstack_zone.network_type:
description: Network type for the zone.
returned: success
type: string
sample: basic
cloudstack_zone.local_storage_enabled:
description: Local storage offering enabled.
returned: success
type: bool
sample: false
cloudstack_zone.securitygroups_enabled:
description: Security groups support is enabled.
returned: success
type: bool
sample: false
cloudstack_zone.guest_cidr_address:
description: Guest CIDR address for the zone
returned: success
type: string
sample: 10.1.1.0/24
cloudstack_zone.dhcp_provider:
description: DHCP provider for the zone
returned: success
type: string
sample: VirtualRouter
cloudstack_zone.zone_token:
description: Zone token
returned: success
type: string
sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
cloudstack_zone.tags:
description: List of resource tags associated with the zone.
returned: success
type: dict
sample: [ { "key": "foo", "value": "bar" } ]
'''
import base64
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackZoneFacts(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackZoneFacts, self).__init__(module)
self.returns = {
'dns1': 'dns1',
'dns2': 'dns2',
'internaldns1': 'internal_dns1',
'internaldns2': 'internal_dns2',
'ipv6dns1': 'dns1_ipv6',
'ipv6dns2': 'dns2_ipv6',
'domain': 'network_domain',
'networktype': 'network_type',
'securitygroupsenabled': 'securitygroups_enabled',
'localstorageenabled': 'local_storage_enabled',
'guestcidraddress': 'guest_cidr_address',
'dhcpprovider': 'dhcp_provider',
'allocationstate': 'allocation_state',
'zonetoken': 'zone_token',
}
self.facts = {
'cloudstack_zone': None,
}
def get_zone(self):
if not self.zone:
# TODO: add param key signature in get_zone()
self.module.params['zone'] = self.module.params.get('name')
super(AnsibleCloudStackZoneFacts, self).get_zone()
return self.zone
def run(self):
zone = self.get_zone()
self.facts['cloudstack_zone'] = self.get_result(zone)
return self.facts
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
)
cs_zone_facts = AnsibleCloudStackZoneFacts(module=module).run()
cs_facts_result = dict(changed=False, ansible_facts=cs_zone_facts)
module.exit_json(**cs_facts_result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
sposs/DIRAC | refs/heads/integration | FrameworkSystem/private/monitoring/PlotCache.py | 3 | # $HeadURL$
__RCSID__ = "$Id$"
import os
import os.path
try:
import hashlib as md5
except:
import md5
import time
import threading
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.FrameworkSystem.private.monitoring.RRDManager import RRDManager
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
gSynchro = Synchronizer()
class PlotCache:
def __init__( self, RRDManager ):
self.rrdManager = RRDManager
self.plotsLocation = self.rrdManager.getGraphLocation()
for file in os.listdir( self.plotsLocation ):
if file.find( ".png" ) > 0:
os.unlink( "%s/%s" % ( self.plotsLocation, file ) )
self.cachedPlots = {}
self.alive = True
self.graceTime = 60
self.purgeThread = threading.Thread( target = self.purgeCached )
self.purgeThread.setDaemon( 1 )
self.purgeThread.start()
def __generateName( self, *args, **kwargs ):
m = md5.md5()
m.update( repr( args ) )
m.update( repr( kwargs ) )
return m.hexdigest()
def __isCurrentTime( self, toSecs ):
currentBucket = self.rrdManager.getCurrentBucketTime( self.graceTime )
return toSecs + self.graceTime > currentBucket
def purgeCached( self ):
while self.alive:
time.sleep( self.graceTime * 2 )
self.__purgeExpiredGraphs()
@gSynchro
def __refreshGraph( self, graphFile ):
self.cachedPlots[ graphFile ][1] = time.time()
@gSynchro
def __registerGraph( self, graphFile, fromSecs, toSecs ):
if self.__isCurrentTime( toSecs ):
cacheTime = self.graceTime * 2
else:
cacheTime = 3600
self.cachedPlots[ graphFile ] = [ cacheTime, time.time() ]
@gSynchro
def __isCacheGraph( self, graphFile ):
return graphFile in self.cachedPlots
@gSynchro
def __purgeExpiredGraphs( self ):
now = time.time()
graphsToDelete = []
for cachedFile in self.cachedPlots:
fileData = self.cachedPlots[ cachedFile ]
if fileData[0] + fileData[1] < now:
graphsToDelete.append( cachedFile )
for cachedFile in graphsToDelete:
try:
filePath = "%s/%s" % ( self.plotsLocation, cachedFile )
os.unlink( filePath )
except Exception, e:
gLogger.error( "Can't delete plot file %s: %s" % ( filePath, str( e ) ) )
del( self.cachedPlots[ cachedFile ] )
def groupPlot( self, *args ):
return self.__getGraph( getattr( self.rrdManager, "groupPlot" ) , args )
def plot( self, *args ):
return self.__getGraph( getattr( self.rrdManager, "plot" ) , args )
def __getGraph( self, plotFunc, args ):
fromSecs = args[0]
toSecs = args[1]
graphFile = "%s-%s-%s.png" % ( self.__generateName( *args[2:] ),
self.rrdManager.bucketize( fromSecs, self.graceTime ),
self.rrdManager.bucketize( toSecs, self.graceTime )
)
if self.__isCacheGraph( graphFile ):
self.__refreshGraph( graphFile )
gLogger.info( "Cached graph file %s" % graphFile )
gMonitor.addMark( "cachedplots" )
return S_OK( graphFile )
else:
gMonitor.addMark( "drawnplots" )
self.__registerGraph( graphFile, fromSecs, toSecs )
return plotFunc( graphFilename = graphFile, *args )
|
mishbahr/django-connected | refs/heads/master | connected_accounts/providers/mailchimp.py | 1 | import json
import logging
from django.utils.translation import ugettext_lazy as _
from requests.exceptions import RequestException
from connected_accounts.conf import settings
from connected_accounts.provider_pool import providers
from .base import OAuth2Provider, ProviderAccount
logger = logging.getLogger('connected_accounts')
class MailChimpAccount(ProviderAccount):
def get_avatar_url(self):
return settings.STATIC_URL + 'img/connected_accounts/providers/mailchimp.png'
def to_str(self):
default = super(MailChimpAccount, self).to_str()
return self.account.extra_data.get('accountname', default)
def get_token(self, token):
"""Returns oauth_token (OAuth1) or access token (OAuth2)"""
return '{0}-{1}'.format(token, self.account.extra_data.get('dc', 'us1'))
def extract_common_fields(self):
data = self.account.extra_data
return dict(name=data.get('accountname'))
class MailChimpProvider(OAuth2Provider):
id = 'mailchimp'
name = _('MailChimp')
account_class = MailChimpAccount
expires_in_key = ''
authorization_url = 'https://login.mailchimp.com/oauth2/authorize'
access_token_url = 'https://login.mailchimp.com/oauth2/token'
profile_url = 'https://login.mailchimp.com/oauth2/metadata'
consumer_key = settings.CONNECTED_ACCOUNTS_MAILCHIMP_CONSUMER_KEY
consumer_secret = settings.CONNECTED_ACCOUNTS_MAILCHIMP_CONSUMER_SECRET
def extract_uid(self, data):
"""Return unique identifier from the profile info."""
return data.get('user_id', None)
def get_profile_data(self, raw_token):
"""Fetch user profile information."""
token_data = json.loads(raw_token)
# This header is the 'magic' that makes this empty GET request work.
headers = {'Authorization': 'OAuth %s' % token_data['access_token']}
try:
response = self.request('get', self.profile_url, headers=headers)
response.raise_for_status()
except RequestException as e:
logger.error('Unable to fetch user profile: {0}'.format(e))
return None
else:
return response.json() or response.text
providers.register(MailChimpProvider)
|
kaiserroll14/301finalproject | refs/heads/master | main/win/main/requests/packages/urllib3/util/retry.py | 66 | import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
kernelmode/kaggle-titanic | refs/heads/master | examples/myfirstforest.py | 26 | """ Writing my first randomforest code.
Author : AstroDave
Date : 23rd September 2012
Revised: 15 April 2014
please see packages.python.org/milk/randomforests.html for more
"""
import pandas as pd
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('train.csv', header=0) # Load the train file into a dataframe
# I need to convert all strings to integer classifiers.
# I need to fill in the missing values of the data and make it complete.
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc.
# All missing Embarked -> just make them embark from most common place
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('test.csv', header=0) # Load the test file into a dataframe
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
print 'Training...'
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
print 'Predicting...'
output = forest.predict(test_data).astype(int)
predictions_file = open("myfirstforest.csv", "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
predictions_file.close()
print 'Done.'
|
jounex/hue | refs/heads/master | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Random/OSRNG/test_generic.py | 131 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_generic.py: Self-test for the OSRNG.new() function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Random.OSRNG"""
__revision__ = "$Id$"
import unittest
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Random.OSRNG.new()"""
# Import the OSRNG module and try to use it
import Crypto.Random.OSRNG
randobj = Crypto.Random.OSRNG.new()
x = randobj.read(16)
y = randobj.read(16)
self.assertNotEqual(x, y)
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
cristianquaglio/odoo | refs/heads/master | addons/resource/resource.py | 25 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from datetime import timedelta
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
import pytz
class resource_calendar(osv.osv):
""" Calendar model for a resource. It has
- attendance_ids: list of resource.calendar.attendance that are a working
interval in a given weekday.
- leave_ids: list of leaves linked to this calendar. A leave can be general
or linked to a specific resource, depending on its resource_id.
All methods in this class use intervals. An interval is a tuple holding
(begin_datetime, end_datetime). A list of intervals is therefore a list of
tuples, holding several intervals of work or leaves. """
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name': fields.char("Name", required=True),
'company_id': fields.many2one('res.company', 'Company', required=False),
'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time', copy=True),
'manager': fields.many2one('res.users', 'Workgroup Manager'),
'leave_ids': fields.one2many(
'resource.calendar.leaves', 'calendar_id', 'Leaves',
help=''
),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_clean(self, intervals):
""" Utility method that sorts and removes overlapping inside datetime
intervals. The intervals are sorted based on increasing starting datetime.
Overlapping intervals are merged into a single one.
:param list intervals: list of intervals; each interval is a tuple
(datetime_from, datetime_to)
:return list cleaned: list of sorted intervals without overlap """
intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime
cleaned = []
working_interval = None
while intervals:
current_interval = intervals.pop(0)
if not working_interval: # init
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[0]: # interval is disjoint
cleaned.append(tuple(working_interval))
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[1]: # union of greater intervals
working_interval[1] = current_interval[1]
if working_interval: # handle void lists
cleaned.append(tuple(working_interval))
return cleaned
def interval_remove_leaves(self, interval, leave_intervals):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
leave_intervals = self.interval_clean(leave_intervals)
current_interval = [interval[0], interval[1]]
for leave in leave_intervals:
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
intervals.append((current_interval[0], current_interval[1]))
return intervals
def interval_schedule_hours(self, intervals, hour, remove_at_end=True):
""" Schedule hours in intervals. The last matching interval is truncated
to match the specified hours.
It is possible to truncate the last interval at its beginning or ending.
However this does nothing on the given interval order that should be
submitted accordingly.
:param list intervals: a list of tuples (beginning datetime, ending datetime)
:param int/float hours: number of hours to schedule. It will be converted
into a timedelta, but should be submitted as an
int or float.
:param boolean remove_at_end: remove extra hours at the end of the last
matching interval. Otherwise, do it at the
beginning.
:return list results: a list of intervals. If the number of hours to schedule
is greater than the possible scheduling in the intervals, no extra-scheduling
is done, and results == intervals. """
results = []
res = datetime.timedelta()
limit = datetime.timedelta(hours=hour)
for interval in intervals:
res += interval[1] - interval[0]
if res > limit and remove_at_end:
interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res)))
elif res > limit:
interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1])
results.append(interval)
if res > limit:
break
return results
# --------------------------------------------------
# Date and hours computation
# --------------------------------------------------
def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None):
""" Given a list of weekdays, return matching resource.calendar.attendance"""
calendar = self.browse(cr, uid, id, context=None)
return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays]
def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None):
""" Return the list of weekdays that contain at least one working interval.
If no id is given (no calendar), return default weekdays. """
if not id:
return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4]
calendar = self.browse(cr, uid, id, context=None)
weekdays = set()
for attendance in calendar.attendance_ids:
weekdays.add(int(attendance.dayofweek))
return list(weekdays)
def get_next_day(self, cr, uid, id, day_date, context=None):
""" Get following date of day_date, based on resource.calendar. If no
calendar is provided, just return the next day.
:param int id: id of a resource.calendar. If not given, simply add one day
to the submitted date.
:param date day_date: current day as a date
:return date: next day of calendar, or just next day """
if not id:
return day_date + relativedelta(days=1)
weekdays = self.get_weekdays(cr, uid, id, context)
base_index = -1
for weekday in weekdays:
if weekday > day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days < 0:
days = 7 + days
return day_date + relativedelta(days=days)
def get_previous_day(self, cr, uid, id, day_date, context=None):
""" Get previous date of day_date, based on resource.calendar. If no
calendar is provided, just return the previous day.
:param int id: id of a resource.calendar. If not given, simply remove
one day from the submitted date.
:param date day_date: current day as a date
:return date: previous day of calendar, or just previous day """
if not id:
return day_date + relativedelta(days=-1)
weekdays = self.get_weekdays(cr, uid, id, context)
weekdays.reverse()
base_index = -1
for weekday in weekdays:
if weekday < day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days > 0:
days = days - 7
return day_date + relativedelta(days=days)
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to))
return leaves
def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working intervals of the day based on calendar. This method
handle leaves that come directly from the leaves parameter or can be computed.
:param int id: resource.calendar id; take the first one if is a list
:param datetime start_dt: datetime object that is the beginning hours
for the working intervals computation; any
working interval beginning before start_dt
will be truncated. If not set, set to end_dt
or today() if no end_dt at 00.00.00.
:param datetime end_dt: datetime object that is the ending hour
for the working intervals computation; any
working interval ending after end_dt
will be truncated. If not set, set to start_dt()
at 23.59.59.
:param list leaves: a list of tuples(start_datetime, end_datetime) that
represent leaves.
:param boolean compute_leaves: if set and if leaves is None, compute the
leaves based on calendar and resource.
If leaves is None and compute_leaves false
no leaves are taken into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return list intervals: a list of tuples (start_datetime, end_datetime)
of work intervals """
if isinstance(id, (list, tuple)):
id = id[0]
# Computes start_dt, end_dt (with default values if not set) + off-interval work limits
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if not id:
working_interval = []
if default_interval:
working_interval = (start_dt.replace(hour=default_interval[0], minute=0, second=0), start_dt.replace(hour=default_interval[1], minute=0, second=0))
intervals = self.interval_remove_leaves(working_interval, work_limits)
return intervals
working_intervals = []
tz_info = fields.datetime.context_timestamp(cr, uid, work_dt, context=context).tzinfo
for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context):
x = work_dt.replace(hour=0, minute=0, second=0) + timedelta(seconds=(calendar_working_day.hour_from * 3600))
y = work_dt.replace(hour=0, minute=0, second=0) + timedelta(seconds=(calendar_working_day.hour_to * 3600))
x = x.replace(tzinfo=tz_info).astimezone(pytz.UTC).replace(tzinfo=None)
y = y.replace(tzinfo=tz_info).astimezone(pytz.UTC).replace(tzinfo=None)
working_interval = (x, y)
working_intervals += self.interval_remove_leaves(working_interval, work_limits)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None)
# filter according to leaves
for interval in working_intervals:
work_intervals = self.interval_remove_leaves(interval, leaves)
intervals += work_intervals
return intervals
def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = datetime.timedelta()
intervals = self.get_working_intervals_of_day(
cr, uid, id,
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval, context)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
hours = 0.0
for day in rrule.rrule(rrule.DAILY, dtstart=start_dt,
until=(end_dt + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0),
byweekday=self.get_weekdays(cr, uid, id, context=context)):
day_start_dt = day.replace(hour=0, minute=0, second=0)
if start_dt and day.date() == start_dt.date():
day_start_dt = start_dt
day_end_dt = day.replace(hour=23, minute=59, second=59)
if end_dt and day.date() == end_dt.date():
day_end_dt = end_dt
hours += self.get_working_hours_of_date(
cr, uid, id, start_dt=day_start_dt, end_dt=day_end_dt,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
return hours
# --------------------------------------------------
# Hours scheduling
# --------------------------------------------------
def _schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Schedule hours of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int hours: number of hours to schedule. Use a negative number to
compute a backwards scheduling.
:param datetime day_dt: reference date to compute working days. If days is
> 0 date is the starting date. If days is < 0
date is the ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Note: Why not using rrule.rrule ? Because rrule does not seem to allow
getting back in time.
"""
if day_dt is None:
day_dt = datetime.datetime.now()
backwards = (hours < 0)
hours = abs(hours)
intervals = []
remaining_hours = hours * 1.0
iterations = 0
current_datetime = day_dt
call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context)
while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000:
if backwards:
call_args['end_dt'] = current_datetime
else:
call_args['start_dt'] = current_datetime
working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args)
if not id and not working_intervals: # no calendar -> consider working 8 hours
remaining_hours -= 8.0
elif working_intervals:
if backwards:
working_intervals.reverse()
new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards)
if backwards:
new_working_intervals.reverse()
res = datetime.timedelta()
for interval in working_intervals:
res += interval[1] - interval[0]
remaining_hours -= (seconds(res) / 3600.0)
if backwards:
intervals = new_working_intervals + intervals
else:
intervals = intervals + new_working_intervals
# get next day
if backwards:
current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59))
else:
current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time())
# avoid infinite loops
iterations += 1
return intervals
def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the beginning/ending datetime of
an hours scheduling. """
res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
return res and res[0][0] or False
def schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the working intervals of an hours
scheduling. """
return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Days scheduling
# --------------------------------------------------
def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
"""Schedule days of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int days: number of days to schedule. Use a negative number to
compute a backwards scheduling.
:param date day_date: reference date to compute working days. If days is > 0
date is the starting date. If days is < 0 date is the
ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Implementation note: rrule.rrule is not used because rrule it des not seem
to allow getting back in time.
"""
if day_date is None:
day_date = datetime.datetime.now()
backwards = (days < 0)
days = abs(days)
intervals = []
planned_days = 0
iterations = 0
current_datetime = day_date.replace(hour=0, minute=0, second=0)
while planned_days < days and iterations < 1000:
working_intervals = self.get_working_intervals_of_day(
cr, uid, id, current_datetime,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
if not id or working_intervals: # no calendar -> no working hours, but day is considered as worked
planned_days += 1
intervals += working_intervals
# get next day
if backwards:
current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context)
else:
current_datetime = self.get_next_day(cr, uid, id, current_datetime, context)
# avoid infinite loops
iterations += 1
return intervals
def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the beginning/ending datetime of
a days scheduling. """
res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
return res and res[-1][1] or False
def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the working intervals of a days
scheduling. """
return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Compatibility / to clean / to remove
# --------------------------------------------------
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
""" Used in hr_payroll/hr_payroll.py
:deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note:
since saas-3, take hour/minutes into account, not just the whole day."""
if isinstance(day, datetime.datetime):
day = day.replace(hour=0, minute=0)
return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None)
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
""" Schedule hours backwards. Used in mrp_operations/mrp_operations.py.
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since
saas-3, counts leave hours instead of all-day leaves."""
return self.schedule_hours(
cr, uid, id, hours * -1.0,
day_dt=dt_from.replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
""" Used in mrp_operations/mrp_operations.py (default parameters) and in
interval_get()
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note:
Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves."""
res = {}
for dt_str, hours, calendar_id in date_and_hours_by_cal:
result = self.schedule_hours(
cr, uid, calendar_id, hours,
day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
res[(dt_str, hours, calendar_id)] = result
return res
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
""" Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py,
crm/crm_lead.py (res given).
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
res = self.interval_get_multi(
cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Unused wrapper.
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True, ondelete='cascade'),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name': fields.char("Name", required=True),
'code': fields.char('Code', size=16, copy=False),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
NOTE: Used in project/project.py
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
NOTE: used in project/project.py, and in generate_resources
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
NOTE: used in project/project.py
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name"),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
for leave in self.browse(cr, uid, ids, context=context):
if leave.date_from and leave.date_to and leave.date_from > leave.date_to:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
def seconds(td):
assert isinstance(td, datetime.timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
silvansky/pjsip_mod | refs/heads/master | tests/pjsua/scripts-pesq/200_codec_speex_8000.py | 3 | # $Id: 200_codec_speex_8000.py 3286 2010-08-18 14:30:15Z bennylp $
#
from inc_cfg import *
ADD_PARAM = ""
if (HAS_SND_DEV == 0):
ADD_PARAM += "--null-audio"
# Call with Speex/8000 codec
test_param = TestParam(
"PESQ codec Speex NB",
[
InstanceParam("UA1", ADD_PARAM + " --max-calls=1 --add-codec speex/8000 --clock-rate 8000 --play-file wavs/input.8.wav --no-vad"),
InstanceParam("UA2", "--null-audio --max-calls=1 --add-codec speex/8000 --clock-rate 8000 --rec-file wavs/tmp.8.wav --auto-answer 200")
]
)
pesq_threshold = 3.65
|
thomasgurry/amplicon_sequencing_pipeline | refs/heads/master | scripts/PipelineFilesInterface.py | 1 | """
Helper functions to move files around.
"""
import os
from string import ascii_lowercase
def split_file_and_return_names(raw_data_file):
"""
Splits an input raw data file into files of different
line lengths. Returns the created files.
"""
rawfilesize = os.path.getsize(raw_data_file)
# Step 1.1 - split file into 1000000 line (~100Mb) chunks
# Note: the 'split' command takes the raw_data_file, splits it into separate files
# and puts those files in the current directory (i.e. working_directory)
if(rawfilesize < 2e8):
os.system('split -l 100000 ' + raw_data_file)
else:
os.system('split -l 1000000 ' + raw_data_file)
# Step 1.2 - get split filenames
split_filenames = []
for c1 in ascii_lowercase:
for c2 in ascii_lowercase:
filename = 'x'+c1+c2
if(os.path.isfile(filename)):
split_filenames.append(filename)
'''
# Claire's note: I'm pretty sure split -l 10000 raw_data_file will always make
# at least xaa, and so len(split_filenames) should never be zero...
if len(split_filenames) == 0:
split_filenames = [raw_data_file]
raw_filenames = split_filenames
'''
return split_filenames
##########################################################################
##% Functions to parse specific summary_file attributes for
##% different raw2otu.py processes
##########################################################################
def parse_input_files(options, summary_obj, amplicon_type):
"""
Parses the raw data file input and returns the file name
and file type.
Parameters
----------
options The options that were passed to raw2otu.py.
Should have options.input_dir
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
raw_data_file raw data file path (i.e. input_dir/raw_file)
raw_data_summary_file file path to file with raw data to sample ID map
raw_file_type 'FASTQ' or 'FASTA'
barcodes_map barcodes map file
primers_files primers file
"""
raw_data_file = None
raw_data_summary_file = None
# Extract file locations
if amplicon_type == '16S':
primers_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['PRIMERS_FILE'])
barcodes_map = os.path.join(options.input_dir, summary_obj.attribute_value_16S['BARCODES_MAP'])
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTQ_FILE'])
raw_file_type = 'FASTQ'
except:
print("No single raw FASTQ file found. Checking for raw FASTA.")
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTA_FILE'])
raw_file_type = 'FASTA'
except:
print("No single raw FASTA file found either. Checking for multiple files.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTQ_FILES'])
raw_file_type = 'FASTQ'
except:
print("No filename of multiple raw FASTQs map provided. Check contents of your raw data and summary file.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTA_FILES'])
raw_file_type = 'FASTA'
except:
print("No filename of multiple raw FASTAs map provided. Check contents of your raw data and summary file.")
raise NameError("Unable to retrieve raw sequencing files.")
elif amplicon_type == 'ITS':
primers_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['PRIMERS_FILE'])
barcodes_map = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['BARCODES_MAP'])
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTQ_FILE'])
raw_file_type = 'FASTQ'
except:
print("No single raw FASTQ file found. Checking for raw FASTA.")
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTA_FILE'])
raw_file_type = 'FASTA'
except:
print("No single raw FASTA file found either. Checking for multiple files.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTQ_FILES'])
raw_file_type = 'FASTQ'
except:
print("No filename of multiple raw FASTQs map provided. Check contents of your raw data and summary file.")
raise NameError("Unable to retrieve raw sequencing files.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTA_FILES'])
raw_file_type = 'FASTA'
except:
print("No filename of multiple raw FASTAs map provided. Check contents of your raw data and summary file.")
raise NameError("Unable to retrieve raw sequencing files.")
return primers_file, barcodes_map, raw_data_file, raw_data_summary_file, raw_file_type
def parse_barcodes_parameters(summary_obj, amplicon_type):
"""
Parses parameters used in splitting by barcodes.
Parameters
----------
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
mode str, either '1', '2', or '3' - indicating
barcodes mode to pass to
2.split_by_barcodes.py
index_file str, If mode '3' is given, the INDEX_FILE
location, otherwise returns None.
index_file_format str (default = 'fastq'). 'fastq', 'fasta',
or 'tab' - for use in 2.split_by_barcodes.py
"""
if amplicon_type == '16S':
if 'BARCODES_MODE' in summary_obj.attribute_value_16S:
mode = summary_obj.attribute_value_16S['BARCODES_MODE']
else:
mode = '2'
# If barcodes are in index file, get those parameters
if mode == '3':
try:
index_file = summary_obj.attribute_value_16S['INDEX_FILE']
except:
raise NameError("Barcodes mode 3 specified (barcodes are in index file), but no INDEX_FILE provided.")
try:
index_file_format = summary_obj.attribute_value_16S['INDEX_FORMAT']
except:
index_file_format = 'fastq'
else:
index_file = None
index_file_format = None
elif amplicon_type == 'ITS':
if 'BARCODES_MODE' in summary_obj.attribute_value_ITS:
mode = summary_obj.attribute_value_ITS['BARCODES_MODE']
else:
mode = '2'
# If barcodes are in index file, get those parameters
if mode == '3':
try:
index_file = summary_obj.attribute_value_ITS['INDEX_FILE']
except:
raise NameError("Barcodes mode 3 specified (barcodes are in index file), but no INDEX_FILE provided.")
try:
index_file_format = summary_obj.attribute_value_ITS['INDEX_FORMAT']
except:
index_file_format = 'fastq'
else:
index_file = None
index_file_format = None
return mode, index_file, index_file_format
def parse_dbotu_parameters(summary_obj, amplicon_type):
"""
Parses summary file for dbOTU options.
Parameters
----------
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
dist max sequence dissimilarity (default = 0.1)
abund minimum fold difference for comparing two OTUs (0=no abundance criterion; default 10.0)
pval minimum p-value for merging OTUs (default: 0.0005)
"""
if amplicon_type == "16S":
try:
dbotu_flag = summary_obj.attribute_value_16S['DBOTU']
except:
dbotu_flag = 'False'
try:
dist = summary_obj.attribute_value_16S['DISTANCE_CRITERIA']
except:
dist = 0.1
try:
abund = summary_obj.attribute_value_16S['ABUNDANCE_CRITERIA']
except:
abund = 10.0
try:
pval = summary_obj.attribute_value_16S['DBOTU_PVAL']
except:
pval = 0.0005
elif amplicon_type == "ITS":
try:
dbotu_flag = summary_obj.attribute_value_ITS['DBOTU']
except:
dbotu_flag = 'False'
try:
dist = summary_obj.attribute_value_ITS['DISTANCE_CRITERIA']
except:
dist = 0.1
try:
abund = summary_obj.attribute_value_ITS['ABUNDANCE_CRITERIA']
except:
abund = 10.0
try:
pval = summary_obj.attribute_value_ITS['DBOTU_PVAL']
except:
pval = 0.0005
else:
raise NameError("Incorrect amplicon type specified for dbOTU summary file parsing")
return dbotu_flag, dist, abund, pval
|
AaronRanAn/Metrics | refs/heads/master | Python/ml_metrics/custom/kdd_average_precision.py | 8 | import numpy as np
def kdd_apk(actual, predicted, k=10):
"""
Computes the average precision at k for Track 1 of the 2012 KDD Cup.
This modified version uses the number of actual clicks as the denominator,
regardless of k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / len(actual)
def kdd_mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k for Track 1 of the 2012 KDD Cup.
This modified version uses the number of actual clicks as the denominator,
regardless of k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([kdd_apk(a,p,k) for a,p in zip(actual, predicted)])
|
divio/django | refs/heads/master | django/template/loaders/app_directories.py | 635 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
from django.template.utils import get_app_template_dirs
from .filesystem import Loader as FilesystemLoader
class Loader(FilesystemLoader):
def get_dirs(self):
return get_app_template_dirs('templates')
|
kawamon/hue | refs/heads/master | apps/sqoop/src/sqoop/api/job.py | 2 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
import sys
from django.utils.encoding import smart_str
from django.views.decorators.cache import never_cache
from sqoop import client, conf
from sqoop.client.exception import SqoopException
from sqoop.api.decorators import get_job_or_exception
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from sqoop.api.exception import handle_rest_exception
from sqoop.api.utils import list_to_dict
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
__all__ = ['get_jobs', 'create_job', 'update_job', 'job', 'jobs', 'job_clone', 'job_delete', 'job_start', 'job_stop', 'job_status']
LOG = logging.getLogger(__name__)
@never_cache
def get_jobs(request):
response = {
'status': 0,
'errors': None,
'jobs': []
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
jobs = c.get_jobs()
response['jobs'] = list_to_dict(jobs)
except RestException as e:
response.update(handle_rest_exception(e, _('Could not get jobs.')))
return JsonResponse(response)
@never_cache
def create_job(request):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
if 'job' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400)
d = json.loads(smart_str(request.POST.get('job')))
job = client.Job.from_dict(d)
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['job'] = c.create_job(job).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not create job.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
def update_job(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
if 'job' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400)
job.update_from_dict(json.loads(smart_str(request.POST.get('job'))))
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['job'] = c.update_job(job).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not update job.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
def jobs(request):
if request.method == 'GET':
return get_jobs(request)
elif request.method == 'POST':
return create_job(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_job_or_exception()
def job(request, job):
response = {
'status': 0,
'errors': None,
'job': None
}
if request.method == 'GET':
response['job'] = job.to_dict()
return JsonResponse(response)
elif request.method == 'POST':
return update_job(request, job)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_job_or_exception()
def job_clone(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
job.id = -1
job.name = '%s-copy' % job.name
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['job'] = c.create_job(job).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not clone job.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_delete(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'job': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
c.delete_job(job)
except RestException as e:
response.update(handle_rest_exception(e, _('Could not delete job.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_start(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['submission'] = c.start_job(job).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not start job.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = [e.to_dict()]
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_stop(request, job):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['submission'] = c.stop_job(job).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not stop job.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_job_or_exception()
def job_status(request, job):
if request.method != 'GET':
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'submission': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['submission'] = c.get_job_status(job).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not get job status.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
|
gsnedders/presto-testo | refs/heads/master | wpt/websockets/websock_handlers/timeout_wsh.py | 4 | #!/usr/bin/python
from mod_pywebsocket import msgutil
import time
def web_socket_do_extra_handshake(request):
time.sleep(22)
def web_socket_transfer_data(request):
pass
|
haiiiiiyun/scrapy | refs/heads/master | scrapy/stats.py | 159 |
"""
Obsolete module, kept for giving a meaningful error message when trying to
import.
"""
raise ImportError("scrapy.stats usage has become obsolete, use "
"`crawler.stats` attribute instead")
|
Kalyzee/edx-platform | refs/heads/master | lms/djangoapps/lms_xblock/mixin.py | 37 | """
Namespace that defines fields common to all blocks used in the LMS
"""
from lazy import lazy
from xblock.fields import Boolean, Scope, String, XBlockMixin, Dict
from xblock.validation import ValidationMessage
from xmodule.modulestore.inheritance import UserPartitionList
from xmodule.partitions.partitions import NoSuchUserPartitionError, NoSuchUserPartitionGroupError
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class GroupAccessDict(Dict):
"""Special Dict class for serializing the group_access field"""
def from_json(self, access_dict):
if access_dict is not None:
return {int(k): access_dict[k] for k in access_dict}
def to_json(self, access_dict):
if access_dict is not None:
return {unicode(k): access_dict[k] for k in access_dict}
class LmsBlockMixin(XBlockMixin):
"""
Mixin that defines fields common to all blocks used in the LMS
"""
hide_from_toc = Boolean(
help=_("Whether to display this module in the table of contents"),
default=False,
scope=Scope.settings
)
format = String(
# Translators: "TOC" stands for "Table of Contents"
help=_("What format this module is in (used for deciding which "
"grader to apply, and what to show in the TOC)"),
scope=Scope.settings,
)
chrome = String(
display_name=_("Courseware Chrome"),
help=_("Enter the chrome, or navigation tools, to use for the XBlock in the LMS. Valid values are: \n"
"\"chromeless\" -- to not use tabs or the accordion; \n"
"\"tabs\" -- to use tabs only; \n"
"\"accordion\" -- to use the accordion only; or \n"
"\"tabs,accordion\" -- to use tabs and the accordion."),
scope=Scope.settings,
default=None,
)
default_tab = String(
display_name=_("Default Tab"),
help=_("Enter the tab that is selected in the XBlock. If not set, the Courseware tab is selected."),
scope=Scope.settings,
default=None,
)
source_file = String(
display_name=_("LaTeX Source File Name"),
help=_("Enter the source file name for LaTeX."),
scope=Scope.settings,
deprecated=True
)
ispublic = Boolean(
display_name=_("Course Is Public"),
help=_("Enter true or false. If true, the course is open to the public. If false, the course is open only to admins."),
scope=Scope.settings
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
group_access = GroupAccessDict(
help=_(
"A dictionary that maps which groups can be shown this block. The keys "
"are group configuration ids and the values are a list of group IDs. "
"If there is no key for a group configuration or if the set of group IDs "
"is empty then the block is considered visible to all. Note that this "
"field is ignored if the block is visible_to_staff_only."
),
default={},
scope=Scope.settings,
)
@lazy
def merged_group_access(self):
"""
This computes access to a block's group_access rules in the context of its position
within the courseware structure, in the form of a lazily-computed attribute.
Each block's group_access rule is merged recursively with its parent's, guaranteeing
that any rule in a parent block will be enforced on descendants, even if a descendant
also defined its own access rules. The return value is always a dict, with the same
structure as that of the group_access field.
When merging access rules results in a case where all groups are denied access in a
user partition (which effectively denies access to that block for all students),
the special value False will be returned for that user partition key.
"""
parent = self.get_parent()
if not parent:
return self.group_access or {}
merged_access = parent.merged_group_access.copy()
if self.group_access is not None:
for partition_id, group_ids in self.group_access.items():
if group_ids: # skip if the "local" group_access for this partition is None or empty.
if partition_id in merged_access:
if merged_access[partition_id] is False:
# special case - means somewhere up the hierarchy, merged access rules have eliminated
# all group_ids from this partition, so there's no possible intersection.
continue
# otherwise, if the parent defines group access rules for this partition,
# intersect with the local ones.
merged_access[partition_id] = list(
set(merged_access[partition_id]).intersection(group_ids)
) or False
else:
# add the group access rules for this partition to the merged set of rules.
merged_access[partition_id] = group_ids
return merged_access
# Specified here so we can see what the value set at the course-level is.
user_partitions = UserPartitionList(
help=_("The list of group configurations for partitioning students in content experiments."),
default=[],
scope=Scope.settings
)
def _get_user_partition(self, user_partition_id):
"""
Returns the user partition with the specified id. Raises
`NoSuchUserPartitionError` if the lookup fails.
"""
for user_partition in self.user_partitions:
if user_partition.id == user_partition_id:
return user_partition
raise NoSuchUserPartitionError("could not find a UserPartition with ID [{}]".format(user_partition_id))
def validate(self):
"""
Validates the state of this xblock instance.
"""
_ = self.runtime.service(self, "i18n").ugettext # pylint: disable=redefined-outer-name
validation = super(LmsBlockMixin, self).validate()
has_invalid_user_partitions = False
has_invalid_groups = False
for user_partition_id, group_ids in self.group_access.iteritems():
try:
user_partition = self._get_user_partition(user_partition_id)
except NoSuchUserPartitionError:
has_invalid_user_partitions = True
else:
# Skip the validation check if the partition has been disabled
if user_partition.active:
for group_id in group_ids:
try:
user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
has_invalid_groups = True
if has_invalid_user_partitions:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content group configurations.")
)
)
if has_invalid_groups:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content groups.")
)
)
return validation
|
ryfeus/lambda-packs | refs/heads/master | Tensorflow_LightGBM_Scipy_nightly/source/scipy/_lib/_gcutils.py | 25 | """
Module for testing automatic garbage collection of objects
.. autosummary::
:toctree: generated/
set_gc_state - enable or disable garbage collection
gc_state - context manager for given state of garbage collector
assert_deallocated - context manager to check for circular references on object
"""
import weakref
import gc
from contextlib import contextmanager
__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
class ReferenceError(AssertionError):
pass
def set_gc_state(state):
""" Set status of garbage collector """
if gc.isenabled() == state:
return
if state:
gc.enable()
else:
gc.disable()
@contextmanager
def gc_state(state):
""" Context manager to set state of garbage collector to `state`
Parameters
----------
state : bool
True for gc enabled, False for disabled
Examples
--------
>>> with gc_state(False):
... assert not gc.isenabled()
>>> with gc_state(True):
... assert gc.isenabled()
"""
orig_state = gc.isenabled()
set_gc_state(state)
yield
set_gc_state(orig_state)
@contextmanager
def assert_deallocated(func, *args, **kwargs):
"""Context manager to check that object is deallocated
This is useful for checking that an object can be freed directly by
reference counting, without requiring gc to break reference cycles.
GC is disabled inside the context manager.
Parameters
----------
func : callable
Callable to create object to check
\\*args : sequence
positional arguments to `func` in order to create object to check
\\*\\*kwargs : dict
keyword arguments to `func` in order to create object to check
Examples
--------
>>> class C(object): pass
>>> with assert_deallocated(C) as c:
... # do something
... del c
>>> class C(object):
... def __init__(self):
... self._circular = self # Make circular reference
>>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
... # do something
... del c
Traceback (most recent call last):
...
ReferenceError: Remaining reference(s) to object
"""
with gc_state(False):
obj = func(*args, **kwargs)
ref = weakref.ref(obj)
yield obj
del obj
if ref() is not None:
raise ReferenceError("Remaining reference(s) to object")
|
sbx320/mtasa-blue-old | refs/heads/master | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-rebuild.py | 299 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are rebuilt correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'rebuild'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'test_app', chdir=CHDIR)
# Touch a source file, rebuild, and check that the app target is up-to-date.
test.touch('rebuild/main.c')
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_app', chdir=CHDIR)
# Xcode runs postbuilds on every build, so targets with postbuilds are
# never marked as up_to_date.
if test.format != 'xcode':
# Same for a framework bundle.
test.build('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
# Test that an app bundle with a postbuild that touches the app binary needs
# to be built only once.
test.build('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
test.pass_test()
|
sairajat/dealii | refs/heads/master | examples/step-55/reference.py | 20 | from sympy import *
from sympy.printing import print_ccode
from sympy.physics.vector import ReferenceFrame, gradient, divergence
from sympy.vector import CoordSysCartesian
R = ReferenceFrame('R');
x = R[0]; y = R[1];
a=-0.5; b=1.5;
visc=1e-1;
lambda_=(1/(2*visc)-sqrt(1/(4*visc**2)+4*pi**2));
print(" visc=%f" % visc)
u=[0,0]
u[0]=1-exp(lambda_*x)*cos(2*pi*y);
u[1]=lambda_/(2*pi)*exp(lambda_*x)*sin(2*pi*y);
p=(exp(3*lambda_)-exp(-lambda_))/(8*lambda_)-exp(2*lambda_*x)/2;
p=p - integrate(p, (x,a,b));
grad_p = gradient(p, R).to_matrix(R)
f0 = -divergence(visc*gradient(u[0], R), R) + grad_p[0];
f1 = -divergence(visc*gradient(u[1], R), R) + grad_p[1];
f2 = divergence(u[0]*R.x + u[1]*R.y, R);
print("\n * RHS:")
print(ccode(f0, assign_to = "values[0]"));
print(ccode(f1, assign_to = "values[1]"));
print(ccode(f2, assign_to = "values[2]"));
print("\n * ExactSolution:")
print(ccode(u[0], assign_to = "values[0]"));
print(ccode(u[1], assign_to = "values[1]"));
print(ccode(p, assign_to = "values[2]"));
print("")
print("pressure mean:", N(integrate(p,(x,a,b))))
|
eduNEXT/edunext-platform | refs/heads/master | cms/djangoapps/contentstore/views/session_kv_store.py | 4 | """
An :class:`~xblock.runtime.KeyValueStore` that stores data in the django session
"""
from xblock.runtime import KeyValueStore
def stringify(key):
return repr(tuple(key))
class SessionKeyValueStore(KeyValueStore):
def __init__(self, request):
self._session = request.session
def get(self, key):
return self._session[stringify(key)]
def set(self, key, value):
self._session[stringify(key)] = value
def delete(self, key):
del self._session[stringify(key)]
def has(self, key):
return stringify(key) in self._session
|
pavelchristof/gomoku-ai | refs/heads/master | tensorflow/python/ops/distributions/dirichlet_multinomial.py | 39 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The DirichletMultinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"DirichletMultinomial",
]
_dirichlet_multinomial_sample_note = """For each batch of counts,
`value = [n_0, ..., n_{K-1}]`, `P[value]` is the probability that after
sampling `self.total_count` draws from this Dirichlet-Multinomial distribution,
the number of draws falling in class `j` is `n_j`. Since this definition is
[exchangeable](https://en.wikipedia.org/wiki/Exchangeable_random_variables);
different sequences have the same counts so the probability includes a
combinatorial coefficient.
Note: `value` must be a non-negative tensor with dtype `self.dtype`, have no
fractional components, and such that
`tf.reduce_sum(value, -1) = self.total_count`. Its shape must be broadcastable
with `self.concentration` and `self.total_count`."""
class DirichletMultinomial(distribution.Distribution):
"""Dirichlet-Multinomial compound distribution.
The Dirichlet-Multinomial distribution is parameterized by a (batch of)
length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of
trials, i.e., the number of trials per draw from the DirichletMultinomial. It
is defined over a (batch of) length-`K` vector `counts` such that
`tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is
identically the Beta-Binomial distribution when `K = 2`.
#### Mathematical Details
The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a
length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`.
The probability mass function (pmf) is,
```none
pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z
Z = Beta(alpha) / N!
```
where:
* `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`,
* `total_count = N`, `N` a positive integer,
* `N!` is `N` factorial, and,
* `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the
[multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
Dirichlet-Multinomial is a [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its
samples are generated as follows.
1. Choose class probabilities:
`probs = [p_0,...,p_{K-1}] ~ Dir(concentration)`
2. Draw integers:
`counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)`
The last `concentration` dimension parametrizes a single Dirichlet-Multinomial
distribution. When calling distribution functions (e.g., `dist.prob(counts)`),
`concentration`, `total_count` and `counts` are broadcast to the same shape.
The last dimension of `counts` corresponds single Dirichlet-Multinomial
distributions.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Pitfalls
The number of classes, `K`, must not exceed:
- the largest integer representable by `self.dtype`, i.e.,
`2**(mantissa_bits+1)` (IEE754),
- the maximum `Tensor` index, i.e., `2**31-1`.
In other words,
```python
K <= min(2**31-1, {
tf.float16: 2**11,
tf.float32: 2**24,
tf.float64: 2**53 }[param.dtype])
```
Note: This condition is validated only when `self.validate_args = True`.
#### Examples
```python
alpha = [1, 2, 3]
n = 2
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0, 0, 2]
dist.prob(counts) # Shape []
# alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match counts.
counts = [[1, 1, 0], [1, 0, 1]]
dist.prob(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]
n = [3, 3]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.
counts = [2, 1, 0]
dist.prob(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of concentration once int
# allowed.
def __init__(self,
total_count,
concentration,
validate_args=False,
allow_nan_stats=True,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
total_count: Non-negative floating point tensor, whose dtype is the same
as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with
`m >= 0`. Defines this as a batch of `N1 x ... x Nm` different
Dirichlet multinomial distributions. Its components should be equal to
integer values.
concentration: Positive floating point tensor, whose dtype is the
same as `n` with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet
multinomial distributions.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[total_count, concentration]):
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, whereas
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough explicitness.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and concentration.
self._total_count = ops.convert_to_tensor(total_count, name="total_count")
if validate_args:
self._total_count = (
distribution_util.embed_check_nonnegative_integer_form(
self._total_count))
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration,
name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(DirichletMultinomial, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._total_count,
self._concentration],
name=name)
@property
def total_count(self):
"""Number of trials used to construct a sample."""
return self._total_count
@property
def concentration(self):
"""Concentration parameter; expected prior counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
# Event shape depends only on total_concentration, not "n".
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k), -2)
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape)
return math_ops.cast(x, self.dtype)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _log_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
ordered_prob = (
special_math_ops.lbeta(self.concentration + counts)
- special_math_ops.lbeta(self.concentration))
return ordered_prob + distribution_util.log_combinations(
self.total_count, counts)
@distribution_util.AppendDocstring(_dirichlet_multinomial_sample_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return self.total_count * (self.concentration /
self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""The covariance for each batch member is defined as the following:
```none
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `concentration = alpha` and
`total_concentration = alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```none
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
""")
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (self.total_count * scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
# We must take care to expand back the last dim whenever we use the
# total_concentration.
c0 = self.total_concentration[..., array_ops.newaxis]
return math_ops.sqrt((1. + c0 / self.total_count) / (1. + c0))
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
concentration = distribution_util.embed_check_categorical_event_shape(
concentration)
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
], concentration)
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_equal(
self.total_count, math_ops.reduce_sum(counts, -1),
message="counts last-dimension must sum to `self.total_count`"),
], counts)
|
ptitjano/bokeh | refs/heads/master | examples/models/sprint.py | 4 | from __future__ import print_function
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.util.browser import view
from bokeh.resources import INLINE
from bokeh.models.glyphs import Circle, Text
from bokeh.models import ColumnDataSource, Range1d, DataRange1d, Plot, LinearAxis, SingleIntervalTicker, Grid, HoverTool
from bokeh.sampledata.sprint import sprint
# Based on http://www.nytimes.com/interactive/2012/08/05/sports/olympics/the-100-meter-dash-one-race-every-medalist-ever.html
abbrev_to_country = {
"USA": "United States",
"GBR": "Britain",
"JAM": "Jamaica",
"CAN": "Canada",
"TRI": "Trinidad and Tobago",
"AUS": "Australia",
"GER": "Germany",
"CUB": "Cuba",
"NAM": "Namibia",
"URS": "Soviet Union",
"BAR": "Barbados",
"BUL": "Bulgaria",
"HUN": "Hungary",
"NED": "Netherlands",
"NZL": "New Zealand",
"PAN": "Panama",
"POR": "Portugal",
"RSA": "South Africa",
"EUA": "United Team of Germany",
}
gold_fill = "#efcf6d"
gold_line = "#c8a850"
silver_fill = "#cccccc"
silver_line = "#b0b0b1"
bronze_fill = "#c59e8a"
bronze_line = "#98715d"
fill_color = { "gold": gold_fill, "silver": silver_fill, "bronze": bronze_fill }
line_color = { "gold": gold_line, "silver": silver_line, "bronze": bronze_line }
def selected_name(name, medal, year):
return name if medal == "gold" and year in [1988, 1968, 1936, 1896] else None
t0 = sprint.Time[0]
sprint["Abbrev"] = sprint.Country
sprint["Country"] = sprint.Abbrev.map(lambda abbr: abbrev_to_country[abbr])
sprint["Medal"] = sprint.Medal.map(lambda medal: medal.lower())
sprint["Speed"] = 100.0/sprint.Time
sprint["MetersBack"] = 100.0*(1.0 - t0/sprint.Time)
sprint["MedalFill"] = sprint.Medal.map(lambda medal: fill_color[medal])
sprint["MedalLine"] = sprint.Medal.map(lambda medal: line_color[medal])
sprint["SelectedName"] = sprint[["Name", "Medal", "Year"]].apply(tuple, axis=1).map(lambda args: selected_name(*args))
source = ColumnDataSource(sprint)
xdr = Range1d(start=sprint.MetersBack.max()+2, end=0) # XXX: +2 is poor-man's padding (otherwise misses last tick)
ydr = DataRange1d(range_padding=0.05) # XXX: should be 2 years (both sides)
plot = Plot(x_range=xdr, y_range=ydr, plot_width=1000, plot_height=600, toolbar_location=None, outline_line_color=None)
plot.title.text = "Usain Bolt vs. 116 years of Olympic sprinters"
xticker = SingleIntervalTicker(interval=5, num_minor_ticks=0)
xaxis = LinearAxis(ticker=xticker, axis_line_color=None, major_tick_line_color=None,
axis_label="Meters behind 2012 Bolt", axis_label_text_font_size="10pt", axis_label_text_font_style="bold")
plot.add_layout(xaxis, "below")
xgrid = Grid(dimension=0, ticker=xaxis.ticker, grid_line_dash="dashed")
plot.add_layout(xgrid)
yticker = SingleIntervalTicker(interval=12, num_minor_ticks=0)
yaxis = LinearAxis(ticker=yticker, major_tick_in=-5, major_tick_out=10)
plot.add_layout(yaxis, "right")
radius = dict(value=5, units="screen")
medal_glyph = Circle(x="MetersBack", y="Year", radius=radius, fill_color="MedalFill", line_color="MedalLine", fill_alpha=0.5)
medal = plot.add_glyph(source, medal_glyph)
athlete_glyph = Text(x="MetersBack", y="Year", x_offset=10, text="SelectedName",
text_align="left", text_baseline="middle", text_font_size="9pt")
athlete = plot.add_glyph(source, athlete_glyph)
no_olympics_glyph = Text(x=7.5, y=1942, text=["No Olympics in 1940 or 1944"],
text_align="center", text_baseline="middle",
text_font_size="9pt", text_font_style="italic", text_color="silver")
no_olympics = plot.add_glyph(no_olympics_glyph)
tooltips = """
<div>
<span style="font-size: 15px;">@Name</span>
<span style="font-size: 10px; color: #666;">(@Abbrev)</span>
</div>
<div>
<span style="font-size: 17px; font-weight: bold;">@Time{0.00}</span>
<span style="font-size: 10px; color: #666;">@Year</span>
</div>
<div style="font-size: 11px; color: #666;">@{MetersBack}{0.00} meters behind</div>
"""
hover = HoverTool(tooltips=tooltips, renderers=[medal])
plot.add_tools(hover)
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "sprint.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, plot.title.text))
print("Wrote %s" % filename)
view(filename)
|
santisiri/popego | refs/heads/master | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/lore/slides.py | 3 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Rudimentary slide support for Lore.
TODO:
- Complete mgp output target
- syntax highlighting
- saner font handling
- probably lots more
- Add HTML output targets
- one slides per page (with navigation links)
- all in one page
Example input file::
<html>
<head><title>Title of talk</title></head>
<body>
<h1>Title of talk</h1>
<h2>First Slide</h2>
<ul>
<li>Bullet point</li>
<li>Look ma, I'm <strong>bold</strong>!</li>
<li>... etc ...</li>
</ul>
<h2>Second Slide</h2>
<pre class="python">
# Sample code sample.
print "Hello, World!"
</pre>
</body>
</html>
"""
from __future__ import nested_scopes
from twisted.lore import default
from twisted.web import domhelpers, microdom
from twisted.python import text
# These should be factored out
from twisted.lore.latex import BaseLatexSpitter, LatexSpitter, processFile, \
getLatexText, HeadingLatexSpitter
from twisted.lore.tree import getHeaders
from tree import removeH1, fixAPI, fontifyPython, \
addPyListings, addHTMLListings, setTitle
import os, os.path, re
from cStringIO import StringIO
hacked_entities = { 'amp': ' &', 'gt': ' >', 'lt': ' <', 'quot': ' "',
'copy': ' (c)'}
entities = { 'amp': '&', 'gt': '>', 'lt': '<', 'quot': '"',
'copy': '(c)'}
class MagicpointOutput(BaseLatexSpitter):
bulletDepth = 0
def writeNodeData(self, node):
buf = StringIO()
getLatexText(node, buf.write, entities=hacked_entities)
data = buf.getvalue().rstrip().replace('\n', ' ')
self.writer(re.sub(' +', ' ', data))
def visitNode_title(self, node):
self.title = domhelpers.getNodeText(node)
def visitNode_body(self, node):
# Adapted from tree.generateToC
self.fontStack = [('standard', None)]
# Title slide
self.writer(self.start_h2)
self.writer(self.title)
self.writer(self.end_h2)
self.writer('%center\n\n\n\n\n')
for authorNode in domhelpers.findElementsWithAttribute(node, 'class', 'author'):
getLatexText(authorNode, self.writer, entities=entities)
self.writer('\n')
# Table of contents
self.writer(self.start_h2)
self.writer(self.title)
self.writer(self.end_h2)
for element in getHeaders(node):
level = int(element.tagName[1])-1
self.writer(level * '\t')
self.writer(domhelpers.getNodeText(element))
self.writer('\n')
self.visitNodeDefault(node)
def visitNode_div_author(self, node):
# Skip this node; it's already been used by visitNode_body
pass
def visitNode_div_pause(self, node):
self.writer('%pause\n')
def visitNode_pre(self, node):
# TODO: Syntax highlighting
buf = StringIO()
getLatexText(node, buf.write, entities=entities)
data = buf.getvalue()
data = text.removeLeadingTrailingBlanks(data)
lines = data.split('\n')
self.fontStack.append(('typewriter', 4))
self.writer('%' + self.fontName() + '\n')
for line in lines:
self.writer(' ' + line + '\n')
del self.fontStack[-1]
self.writer('%' + self.fontName() + '\n')
def visitNode_ul(self, node):
if self.bulletDepth > 0:
self.writer(self._start_ul)
self.bulletDepth += 1
self.start_li = self._start_li * self.bulletDepth
self.visitNodeDefault(node)
self.bulletDepth -= 1
self.start_li = self._start_li * self.bulletDepth
def visitNode_strong(self, node):
self.doFont(node, 'bold')
def visitNode_em(self, node):
self.doFont(node, 'italic')
def visitNode_code(self, node):
self.doFont(node, 'typewriter')
def doFont(self, node, style):
self.fontStack.append((style, None))
self.writer(' \n%cont, ' + self.fontName() + '\n')
self.visitNodeDefault(node)
del self.fontStack[-1]
self.writer('\n%cont, ' + self.fontName() + '\n')
def fontName(self):
names = [x[0] for x in self.fontStack]
if 'typewriter' in names:
name = 'typewriter'
else:
name = ''
if 'bold' in names:
name += 'bold'
if 'italic' in names:
name += 'italic'
if name == '':
name = 'standard'
sizes = [x[1] for x in self.fontStack]
sizes.reverse()
for size in sizes:
if size:
return 'font "%s", size %d' % (name, size)
return 'font "%s"' % name
start_h2 = "%page\n\n"
end_h2 = '\n\n\n'
_start_ul = '\n'
_start_li = "\t"
end_li = "\n"
def convertFile(filename, outputter, template, ext=".mgp"):
fout = open(os.path.splitext(filename)[0]+ext, 'w')
fout.write(open(template).read())
spitter = outputter(fout.write, os.path.dirname(filename), filename)
fin = open(filename)
processFile(spitter, fin)
fin.close()
fout.close()
# HTML DOM tree stuff
def splitIntoSlides(document):
body = domhelpers.findNodesNamed(document, 'body')[0]
slides = []
slide = []
title = '(unset)'
for child in body.childNodes:
if isinstance(child, microdom.Element) and child.tagName == 'h2':
if slide:
slides.append((title, slide))
slide = []
title = domhelpers.getNodeText(child)
else:
slide.append(child)
slides.append((title, slide))
return slides
def insertPrevNextLinks(slides, filename, ext):
for slide in slides:
for name, offset in (("previous", -1), ("next", +1)):
if (slide.pos > 0 and name == "previous") or \
(slide.pos < len(slides)-1 and name == "next"):
for node in domhelpers.findElementsWithAttribute(slide.dom, "class", name):
if node.tagName == 'a':
node.setAttribute('href', '%s-%d%s'
% (filename[0], slide.pos+offset, ext))
else:
node.appendChild(microdom.Text(slides[slide.pos+offset].title))
else:
for node in domhelpers.findElementsWithAttribute(slide.dom, "class", name):
pos = 0
for child in node.parentNode.childNodes:
if child is node:
del node.parentNode.childNodes[pos]
break
pos += 1
class HTMLSlide:
def __init__(self, dom, title, pos):
self.dom = dom
self.title = title
self.pos = pos
def munge(document, template, linkrel, d, fullpath, ext, url, config):
# FIXME: This has *way* to much duplicated crap in common with tree.munge
#fixRelativeLinks(template, linkrel)
removeH1(document)
fixAPI(document, url)
fontifyPython(document)
addPyListings(document, d)
addHTMLListings(document, d)
#fixLinks(document, ext)
#putInToC(template, generateToC(document))
template = template.cloneNode(1)
# Insert the slides into the template
slides = []
pos = 0
for title, slide in splitIntoSlides(document):
t = template.cloneNode(1)
setTitle(t, [microdom.Text(title)])
tmplbody = domhelpers.findElementsWithAttribute(t, "class", "body")[0]
tmplbody.childNodes = slide
tmplbody.setAttribute("class", "content")
# FIXME: Next/Prev links
# FIXME: Perhaps there should be a "Template" class? (setTitle/setBody
# could be methods...)
slides.append(HTMLSlide(t, title, pos))
pos += 1
insertPrevNextLinks(slides, os.path.splitext(os.path.basename(fullpath)), ext)
return slides
from tree import makeSureDirectoryExists
def getOutputFileName(originalFileName, outputExtension, index):
return os.path.splitext(originalFileName)[0]+'-'+str(index) + outputExtension
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
from tree import parseFileAndReport
doc = parseFileAndReport(filename)
slides = munge(doc, templ, linkrel, os.path.dirname(filename), filename, ext, url, options)
for slide, index in zip(slides, range(len(slides))):
newFilename = outfileGenerator(filename, ext, index)
makeSureDirectoryExists(newFilename)
slide.dom.writexml(open(newFilename, 'wb'))
# Prosper output
class ProsperSlides(LatexSpitter):
firstSlide = 1
start_html = '\\documentclass[ps]{prosper}\n'
start_body = '\\begin{document}\n'
start_div_author = '\\author{'
end_div_author = '}'
def visitNode_h2(self, node):
if self.firstSlide:
self.firstSlide = 0
self.end_body = '\\end{slide}\n\n' + self.end_body
else:
self.writer('\\end{slide}\n\n')
self.writer('\\begin{slide}{')
spitter = HeadingLatexSpitter(self.writer, self.currDir, self.filename)
spitter.visitNodeDefault(node)
self.writer('}')
def _write_img(self, target):
self.writer('\\begin{center}\\includegraphics[%%\nwidth=1.0\n\\textwidth,'
'height=1.0\\textheight,\nkeepaspectratio]{%s}\\end{center}\n' % target)
class PagebreakLatex(LatexSpitter):
everyN = 1
currentN = 0
seenH2 = 0
start_html = LatexSpitter.start_html+"\\date{}\n"
start_body = '\\begin{document}\n\n'
def visitNode_h2(self, node):
if not self.seenH2:
self.currentN = 0
self.seenH2 = 1
else:
self.currentN += 1
self.currentN %= self.everyN
if not self.currentN:
self.writer('\\clearpage\n')
level = (int(node.tagName[1])-2)+self.baseLevel
self.writer('\n\n\\'+level*'sub'+'section*{')
spitter = HeadingLatexSpitter(self.writer, self.currDir, self.filename)
spitter.visitNodeDefault(node)
self.writer('}\n')
class TwoPagebreakLatex(PagebreakLatex):
everyN = 2
class SlidesProcessingFunctionFactory(default.ProcessingFunctionFactory):
latexSpitters = default.ProcessingFunctionFactory.latexSpitters.copy()
latexSpitters['prosper'] = ProsperSlides
latexSpitters['page'] = PagebreakLatex
latexSpitters['twopage'] = TwoPagebreakLatex
def getDoFile(self):
return doFile
def generate_mgp(self, d, fileNameGenerator=None):
template = d.get('template', 'template.mgp')
df = lambda file, linkrel: convertFile(file, MagicpointOutput, template, ext=".mgp")
return df
factory=SlidesProcessingFunctionFactory()
|
adrienbrault/home-assistant | refs/heads/dev | homeassistant/components/zha/core/channels/closures.py | 5 | """Closures channels module for Zigbee Home Automation."""
import zigpy.zcl.clusters.closures as closures
from homeassistant.core import callback
from .. import registries
from ..const import REPORT_CONFIG_IMMEDIATE, SIGNAL_ATTR_UPDATED
from .base import ClientChannel, ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(closures.DoorLock.cluster_id)
class DoorLockChannel(ZigbeeChannel):
"""Door lock channel."""
_value_attribute = 0
REPORT_CONFIG = ({"attr": "lock_state", "config": REPORT_CONFIG_IMMEDIATE},)
async def async_update(self):
"""Retrieve latest state."""
result = await self.get_attribute_value("lock_state", from_cache=True)
if result is not None:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", 0, "lock_state", result
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if (
self._cluster.client_commands is None
or self._cluster.client_commands.get(command_id) is None
):
return
command_name = self._cluster.client_commands.get(command_id, [command_id])[0]
if command_name == "operation_event_notification":
self.zha_send_event(
command_name,
{
"source": args[0].name,
"operation": args[1].name,
"code_slot": (args[2] + 1), # start code slots at 1
},
)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update from lock cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
if attrid == self._value_attribute:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, attr_name, value
)
async def async_set_user_code(self, code_slot: int, user_code: str) -> None:
"""Set the user code for the code slot."""
await self.set_pin_code(
code_slot - 1, # start code slots at 1, Zigbee internals use 0
closures.DoorLock.UserStatus.Enabled,
closures.DoorLock.UserType.Unrestricted,
user_code,
)
async def async_enable_user_code(self, code_slot: int) -> None:
"""Enable the code slot."""
await self.set_user_status(code_slot - 1, closures.DoorLock.UserStatus.Enabled)
async def async_disable_user_code(self, code_slot: int) -> None:
"""Disable the code slot."""
await self.set_user_status(code_slot - 1, closures.DoorLock.UserStatus.Disabled)
async def async_get_user_code(self, code_slot: int) -> int:
"""Get the user code from the code slot."""
result = await self.get_pin_code(code_slot - 1)
return result
async def async_clear_user_code(self, code_slot: int) -> None:
"""Clear the code slot."""
await self.clear_pin_code(code_slot - 1)
async def async_clear_all_user_codes(self) -> None:
"""Clear all code slots."""
await self.clear_all_pin_codes()
async def async_set_user_type(self, code_slot: int, user_type: str) -> None:
"""Set user type."""
await self.set_user_type(code_slot - 1, user_type)
async def async_get_user_type(self, code_slot: int) -> str:
"""Get user type."""
result = await self.get_user_type(code_slot - 1)
return result
@registries.ZIGBEE_CHANNEL_REGISTRY.register(closures.Shade.cluster_id)
class Shade(ZigbeeChannel):
"""Shade channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(closures.WindowCovering.cluster_id)
class WindowCoveringClient(ClientChannel):
"""Window client channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(closures.WindowCovering.cluster_id)
class WindowCovering(ZigbeeChannel):
"""Window channel."""
_value_attribute = 8
REPORT_CONFIG = (
{"attr": "current_position_lift_percentage", "config": REPORT_CONFIG_IMMEDIATE},
)
async def async_update(self):
"""Retrieve latest state."""
result = await self.get_attribute_value(
"current_position_lift_percentage", from_cache=False
)
self.debug("read current position: %s", result)
if result is not None:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
8,
"current_position_lift_percentage",
result,
)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update from window_covering cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
if attrid == self._value_attribute:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, attr_name, value
)
|
GenericStudent/home-assistant | refs/heads/dev | homeassistant/components/vera/__init__.py | 8 | """Support for Vera devices."""
import asyncio
from collections import defaultdict
import logging
from typing import Any, Dict, Generic, List, Optional, Type, TypeVar
import pyvera as veraApi
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ARMED,
ATTR_BATTERY_LEVEL,
ATTR_LAST_TRIP_TIME,
ATTR_TRIPPED,
CONF_EXCLUDE,
CONF_LIGHTS,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
from homeassistant.util.dt import utc_from_timestamp
from .common import (
ControllerData,
SubscriptionRegistry,
get_configured_platforms,
get_controller_data,
set_controller_data,
)
from .config_flow import fix_device_id_list, new_options
from .const import (
ATTR_CURRENT_ENERGY_KWH,
ATTR_CURRENT_POWER_W,
CONF_CONTROLLER,
CONF_LEGACY_UNIQUE_ID,
DOMAIN,
VERA_ID_FORMAT,
)
_LOGGER = logging.getLogger(__name__)
VERA_ID_LIST_SCHEMA = vol.Schema([int])
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTROLLER): cv.url,
vol.Optional(CONF_EXCLUDE, default=[]): VERA_ID_LIST_SCHEMA,
vol.Optional(CONF_LIGHTS, default=[]): VERA_ID_LIST_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, base_config: dict) -> bool:
"""Set up for Vera controllers."""
hass.data[DOMAIN] = {}
config = base_config.get(DOMAIN)
if not config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=config,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Do setup of vera."""
# Use options entered during initial config flow or provided from configuration.yml
if config_entry.data.get(CONF_LIGHTS) or config_entry.data.get(CONF_EXCLUDE):
hass.config_entries.async_update_entry(
entry=config_entry,
data=config_entry.data,
options=new_options(
config_entry.data.get(CONF_LIGHTS, []),
config_entry.data.get(CONF_EXCLUDE, []),
),
)
saved_light_ids = config_entry.options.get(CONF_LIGHTS, [])
saved_exclude_ids = config_entry.options.get(CONF_EXCLUDE, [])
base_url = config_entry.data[CONF_CONTROLLER]
light_ids = fix_device_id_list(saved_light_ids)
exclude_ids = fix_device_id_list(saved_exclude_ids)
# If the ids were corrected. Update the config entry.
if light_ids != saved_light_ids or exclude_ids != saved_exclude_ids:
hass.config_entries.async_update_entry(
entry=config_entry, options=new_options(light_ids, exclude_ids)
)
# Initialize the Vera controller.
subscription_registry = SubscriptionRegistry(hass)
controller = veraApi.VeraController(base_url, subscription_registry)
await hass.async_add_executor_job(controller.start)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, controller.stop)
try:
all_devices = await hass.async_add_executor_job(controller.get_devices)
all_scenes = await hass.async_add_executor_job(controller.get_scenes)
except RequestException as exception:
# There was a network related error connecting to the Vera controller.
_LOGGER.exception("Error communicating with Vera API")
raise ConfigEntryNotReady from exception
# Exclude devices unwanted by user.
devices = [device for device in all_devices if device.device_id not in exclude_ids]
vera_devices = defaultdict(list)
for device in devices:
device_type = map_vera_device(device, light_ids)
if device_type is not None:
vera_devices[device_type].append(device)
vera_scenes = []
for scene in all_scenes:
vera_scenes.append(scene)
controller_data = ControllerData(
controller=controller,
devices=vera_devices,
scenes=vera_scenes,
config_entry=config_entry,
)
set_controller_data(hass, config_entry, controller_data)
# Forward the config data to the necessary platforms.
for platform in get_configured_platforms(controller_data):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload Withings config entry."""
controller_data: ControllerData = get_controller_data(hass, config_entry)
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in get_configured_platforms(controller_data)
]
tasks.append(hass.async_add_executor_job(controller_data.controller.stop))
await asyncio.gather(*tasks)
return True
def map_vera_device(vera_device: veraApi.VeraDevice, remap: List[int]) -> str:
"""Map vera classes to Home Assistant types."""
type_map = {
veraApi.VeraDimmer: "light",
veraApi.VeraBinarySensor: "binary_sensor",
veraApi.VeraSensor: "sensor",
veraApi.VeraArmableDevice: "switch",
veraApi.VeraLock: "lock",
veraApi.VeraThermostat: "climate",
veraApi.VeraCurtain: "cover",
veraApi.VeraSceneController: "sensor",
veraApi.VeraSwitch: "switch",
}
def map_special_case(instance_class: Type, entity_type: str) -> str:
if instance_class is veraApi.VeraSwitch and vera_device.device_id in remap:
return "light"
return entity_type
return next(
iter(
map_special_case(instance_class, entity_type)
for instance_class, entity_type in type_map.items()
if isinstance(vera_device, instance_class)
),
None,
)
DeviceType = TypeVar("DeviceType", bound=veraApi.VeraDevice)
class VeraDevice(Generic[DeviceType], Entity):
"""Representation of a Vera device entity."""
def __init__(self, vera_device: DeviceType, controller_data: ControllerData):
"""Initialize the device."""
self.vera_device = vera_device
self.controller = controller_data.controller
self._name = self.vera_device.name
# Append device id to prevent name clashes in HA.
self.vera_id = VERA_ID_FORMAT.format(
slugify(vera_device.name), vera_device.vera_device_id
)
if controller_data.config_entry.data.get(CONF_LEGACY_UNIQUE_ID):
self._unique_id = str(self.vera_device.vera_device_id)
else:
self._unique_id = f"vera_{controller_data.config_entry.unique_id}_{self.vera_device.vera_device_id}"
async def async_added_to_hass(self) -> None:
"""Subscribe to updates."""
self.controller.register(self.vera_device, self._update_callback)
def _update_callback(self, _device: DeviceType) -> None:
"""Update the state."""
self.schedule_update_ha_state(True)
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def should_poll(self) -> bool:
"""Get polling requirement from vera device."""
return self.vera_device.should_poll
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the device."""
attr = {}
if self.vera_device.has_battery:
attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level
if self.vera_device.is_armable:
armed = self.vera_device.is_armed
attr[ATTR_ARMED] = "True" if armed else "False"
if self.vera_device.is_trippable:
last_tripped = self.vera_device.last_trip
if last_tripped is not None:
utc_time = utc_from_timestamp(int(last_tripped))
attr[ATTR_LAST_TRIP_TIME] = utc_time.isoformat()
else:
attr[ATTR_LAST_TRIP_TIME] = None
tripped = self.vera_device.is_tripped
attr[ATTR_TRIPPED] = "True" if tripped else "False"
power = self.vera_device.power
if power:
attr[ATTR_CURRENT_POWER_W] = convert(power, float, 0.0)
energy = self.vera_device.energy
if energy:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(energy, float, 0.0)
attr["Vera Device Id"] = self.vera_device.vera_device_id
return attr
@property
def unique_id(self) -> str:
"""Return a unique ID.
The Vera assigns a unique and immutable ID number to each device.
"""
return self._unique_id
|
srluge/SickRage | refs/heads/master | lib/sqlalchemy/dialects/mysql/oursql.py | 79 | # mysql/oursql.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
Unicode
-------
oursql defaults to using ``utf8`` as the connection charset, but other
encodings may be used instead. Like the MySQL-Python driver, unicode support
can be completely disabled::
# oursql sets the connection charset to utf8 automatically; all strings come
# back as utf8 str
create_engine('mysql+oursql:///mydb?use_unicode=0')
To not automatically use ``utf8`` and instead use whatever the connection
defaults to, there is a separate parameter::
# use the default connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?default_charset=1')
# use latin1 as the connection charset; all strings come back as unicode
create_engine('mysql+oursql:///mydb?charset=latin1')
"""
import re
from .base import (BIT, MySQLDialect, MySQLExecutionContext)
from ... import types as sqltypes, util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get('_oursql_plain_query', False)
class MySQLDialect_oursql(MySQLDialect):
driver = 'oursql'
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _oursqlBIT,
}
)
@classmethod
def dbapi(cls):
return __import__('oursql')
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of *cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute('BEGIN', plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(xid.encode(charset)).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, 'XA BEGIN %s', xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA PREPARE %s', xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA ROLLBACK %s', xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, 'XA COMMIT %s', xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self,
connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(self, connection, table, charset=None,
full_name=None):
return MySQLDialect._show_create_table(
self,
connection.contextual_connect(close_with_result=True).
execution_options(_oursql_plain_query=True),
table, charset, full_name
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed')
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'port', int)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'autoping', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'default_charset', bool)
if opts.pop('default_charset', False):
opts['charset'] = None
else:
util.coerce_kw_type(opts, 'charset', str)
opts['use_unicode'] = opts.get('use_unicode', True)
util.coerce_kw_type(opts, 'use_unicode', bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault('found_rows', True)
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.server_info):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
|
GunoH/intellij-community | refs/heads/master | python/testData/inspections/PyRelativeImportInspection/PlainDirectoryDottedImportFromDotTwoElementsWithAs/plainDirectory/script.py | 10 | <weak_warning descr="Relative import outside of a package">from . import foo, bar as b</weak_warning> |
thiagopena/djangoSIGE | refs/heads/master | djangosige/apps/compras/apps.py | 1 | from __future__ import unicode_literals
from django.apps import AppConfig
class ComprasConfig(AppConfig):
name = 'compras'
|
BruceDai/web-testing-service | refs/heads/master | tools/pywebsocket/src/mod_pywebsocket/_stream_base.py | 652 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import socket
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
try:
read_bytes = self._request.connection.read(length)
if not read_bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return read_bytes
except socket.error, e:
# Catch a socket.error. Because it's not a child class of the
# IOError prior to Python 2.6, we cannot omit this except clause.
# Use %s rather than %r for the exception to use human friendly
# format.
raise ConnectionTerminatedException(
'Receiving %d byte failed. socket.error (%s) occurred' %
(length, e))
except IOError, e:
# Also catch an IOError because mod_python throws it.
raise ConnectionTerminatedException(
'Receiving %d byte failed. IOError (%s) occurred' %
(length, e))
def _write(self, bytes_to_write):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes_to_write)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while length > 0:
new_read_bytes = self._read(length)
read_bytes.append(new_read_bytes)
length -= len(new_read_bytes)
return ''.join(read_bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
read_bytes.append(ch)
return ''.join(read_bytes)
# vi:sts=4 sw=4 et
|
ninowalker/mapnik-trunk | refs/heads/master | scons/scons-local-1.2.0/SCons/Tool/PharLapCommon.py | 12 | """SCons.Tool.PharLapCommon
This module contains common code used by all Tools for the
Phar Lap ETS tool chain. Right now, this is linkloc and
386asm.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/PharLapCommon.py 3842 2008/12/20 22:59:52 scons"
import os
import os.path
import SCons.Errors
import SCons.Util
import re
import string
def getPharLapPath():
"""Reads the registry to find the installed path of the Phar Lap ETS
development kit.
Raises UserError if no installed version of Phar Lap can
be found."""
if not SCons.Util.can_read_reg:
raise SCons.Errors.InternalError, "No Windows registry module was found"
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Pharlap\\ETS')
val, type = SCons.Util.RegQueryValueEx(k, 'BaseDir')
# The following is a hack...there is (not surprisingly)
# an odd issue in the Phar Lap plug in that inserts
# a bunch of junk data after the phar lap path in the
# registry. We must trim it.
idx=val.find('\0')
if idx >= 0:
val = val[:idx]
return os.path.normpath(val)
except SCons.Util.RegError:
raise SCons.Errors.UserError, "Cannot find Phar Lap ETS path in the registry. Is it installed properly?"
REGEX_ETS_VER = re.compile(r'#define\s+ETS_VER\s+([0-9]+)')
def getPharLapVersion():
"""Returns the version of the installed ETS Tool Suite as a
decimal number. This version comes from the ETS_VER #define in
the embkern.h header. For example, '#define ETS_VER 1010' (which
is what Phar Lap 10.1 defines) would cause this method to return
1010. Phar Lap 9.1 does not have such a #define, but this method
will return 910 as a default.
Raises UserError if no installed version of Phar Lap can
be found."""
include_path = os.path.join(getPharLapPath(), os.path.normpath("include/embkern.h"))
if not os.path.exists(include_path):
raise SCons.Errors.UserError, "Cannot find embkern.h in ETS include directory.\nIs Phar Lap ETS installed properly?"
mo = REGEX_ETS_VER.search(open(include_path, 'r').read())
if mo:
return int(mo.group(1))
# Default return for Phar Lap 9.1
return 910
def addPathIfNotExists(env_dict, key, path, sep=os.pathsep):
"""This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there."""
try:
is_list = 1
paths = env_dict[key]
if not SCons.Util.is_List(env_dict[key]):
paths = string.split(paths, sep)
is_list = 0
if not os.path.normcase(path) in map(os.path.normcase, paths):
paths = [ path ] + paths
if is_list:
env_dict[key] = paths
else:
env_dict[key] = string.join(paths, sep)
except KeyError:
env_dict[key] = path
def addPharLapPaths(env):
"""This function adds the path to the Phar Lap binaries, includes,
and libraries, if they are not already there."""
ph_path = getPharLapPath()
try:
env_dict = env['ENV']
except KeyError:
env_dict = {}
env['ENV'] = env_dict
addPathIfNotExists(env_dict, 'PATH',
os.path.join(ph_path, 'bin'))
addPathIfNotExists(env_dict, 'INCLUDE',
os.path.join(ph_path, 'include'))
addPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, 'lib'))
addPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, os.path.normpath('lib/vclib')))
env['PHARLAP_PATH'] = getPharLapPath()
env['PHARLAP_VERSION'] = str(getPharLapVersion())
|
mlperf/training_results_v0.5 | refs/heads/master | v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/t2t/tensor2tensor/data_generators/wnli.py | 3 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for the Winograd NLI dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import zipfile
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow as tf
EOS = text_encoder.EOS
@registry.register_problem
class WinogradNLI(text_problems.TextConcat2ClassProblem):
"""Winograd NLI classification problems."""
# Link to data from GLUE: https://gluebenchmark.com/tasks
_WNLI_URL = ("https://firebasestorage.googleapis.com/v0/b/"
"mtl-sentence-representations.appspot.com/o/"
"data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-"
"4bd7-99a5-5e00222e0faf")
@property
def is_generate_per_split(self):
return True
@property
def dataset_splits(self):
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 1,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
@property
def approx_vocab_size(self):
return 2**13 # 8k vocab suffices for this small dataset.
@property
def vocab_filename(self):
return "vocab.wnli.%d" % self.approx_vocab_size
@property
def num_classes(self):
return 2
def class_labels(self, data_dir):
del data_dir
# Note this binary classification is different from usual MNLI.
return ["not_entailment", "entailment"]
def _maybe_download_corpora(self, tmp_dir):
wnli_filename = "WNLI.zip"
wnli_finalpath = os.path.join(tmp_dir, "WNLI")
if not tf.gfile.Exists(wnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, wnli_filename, self._WNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return wnli_finalpath
def example_generator(self, filename):
for idx, line in enumerate(tf.gfile.Open(filename, "rb")):
if idx == 0: continue # skip header
if six.PY2:
line = unicode(line.strip(), "utf-8")
else:
line = line.strip().decode("utf-8")
_, s1, s2, l = line.split("\t")
inputs = [s1, s2]
yield {
"inputs": inputs,
"label": int(l)
}
def generate_samples(self, data_dir, tmp_dir, dataset_split):
wnli_dir = self._maybe_download_corpora(tmp_dir)
if dataset_split == problem.DatasetSplit.TRAIN:
filesplit = "train.tsv"
else:
filesplit = "dev.tsv"
filename = os.path.join(wnli_dir, filesplit)
for example in self.example_generator(filename):
yield example
@registry.register_problem
class WinogradNLICharacters(WinogradNLI):
"""Winograd NLI classification problems, character level"""
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
def global_task_id(self):
return problem.TaskID.EN_NLI
|
bansalvarun/Drome | refs/heads/master | allauth/socialaccount/providers/openid/admin.py | 76 | from django.contrib import admin
from .models import OpenIDStore, OpenIDNonce
class OpenIDStoreAdmin(admin.ModelAdmin):
pass
class OpenIDNonceAdmin(admin.ModelAdmin):
pass
admin.site.register(OpenIDStore, OpenIDStoreAdmin)
admin.site.register(OpenIDNonce, OpenIDNonceAdmin)
|
RPGOne/Skynet | refs/heads/Miho | node-master/deps/v8/tools/gen-inlining-tests.py | 7 | #!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import textwrap
import sys
SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
# Generates 2 files. Found by trial and error.
SHARD_SIZE = 97
PREAMBLE = """
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --turbo --no-always-opt
// This test file was generated by tools/gen-inlining-tests.py .
// Global variables
var deopt = undefined; // either true or false
var counter = 0;
function resetState() {
counter = 0;
}
function warmUp(f) {
try {
f();
} catch (ex) {
// ok
}
try {
f();
} catch (ex) {
// ok
}
}
function resetOptAndAssertResultEquals(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
assertEquals(expected, f());
}
function resetOptAndAssertThrowsWith(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
try {
var result = f();
fail("resetOptAndAssertThrowsWith",
"exception: " + expected,
"result: " + result);
} catch (ex) {
assertEquals(expected, ex);
}
}
function increaseAndReturn15() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
function increaseAndThrow42() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
function increaseAndReturn15_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
function increaseAndThrow42_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
// Alternative 1
function returnOrThrow(doReturn) {
if (doReturn) {
return increaseAndReturn15();
} else {
return increaseAndThrow42();
}
}
// Alternative 2
function increaseAndReturn15_calls_noopt() {
return increaseAndReturn15_noopt_inner();
}
function increaseAndThrow42_calls_noopt() {
return increaseAndThrow42_noopt_inner();
}
// Alternative 3.
// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
// as the other one.
function invertFunctionCall(f) {
var result;
try {
result = f();
} catch (ex) {
return ex - 27;
}
throw result + 27;
}
// Alternative 4: constructor
function increaseAndStore15Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 15;
}
function increaseAndThrow42Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 42;
throw this.x;
}
// Alternative 5: property
var magic = {};
Object.defineProperty(magic, 'prop', {
get: function () {
if (deopt) %DeoptimizeFunction(f);
return 15 + 0 * ++counter;
},
set: function(x) {
// argument should be 37
if (deopt) %DeoptimizeFunction(f);
counter -= 36 - x; // increments counter
throw 42;
}
})
// Generate type feedback.
assertEquals(15, increaseAndReturn15_calls_noopt());
assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
assertEquals(15, (new increaseAndStore15Constructor()).x);
assertThrowsEquals(function() {
return (new increaseAndThrow42Constructor()).x;
},
42);
function runThisShard() {
""".strip()
def booltuples(n):
"""booltuples(2) yields 4 tuples: (False, False), (False, True),
(True, False), (True, True)."""
assert isinstance(n, int)
if n <= 0:
yield ()
else:
for initial in booltuples(n-1):
yield initial + (False,)
yield initial + (True,)
def fnname(flags):
assert len(FLAGLETTERS) == len(flags)
return "f_" + ''.join(
FLAGLETTERS[i] if b else '_'
for (i, b) in enumerate(flags))
NUM_TESTS_PRINTED = 0
NUM_TESTS_IN_SHARD = 0
def printtest(flags):
"""Print a test case. Takes a couple of boolean flags, on which the
printed Javascript code depends."""
assert all(isinstance(flag, bool) for flag in flags)
# The alternative flags are in reverse order so that if we take all possible
# tuples, ordered lexicographically from false to true, we get first the
# default, then alternative 1, then 2, etc.
(
alternativeFn5, # use alternative #5 for returning/throwing:
# return/throw using property
alternativeFn4, # use alternative #4 for returning/throwing:
# return/throw using constructor
alternativeFn3, # use alternative #3 for returning/throwing:
# return/throw indirectly, based on function argument
alternativeFn2, # use alternative #2 for returning/throwing:
# return/throw indirectly in unoptimized code,
# no branching
alternativeFn1, # use alternative #1 for returning/throwing:
# return/throw indirectly, based on boolean arg
tryThrows, # in try block, call throwing function
tryReturns, # in try block, call returning function
tryFirstReturns, # in try block, returning goes before throwing
tryResultToLocal, # in try block, result goes to local variable
doCatch, # include catch block
catchReturns, # in catch block, return
catchWithLocal, # in catch block, modify or return the local variable
catchThrows, # in catch block, throw
doFinally, # include finally block
finallyReturns, # in finally block, return local variable
finallyThrows, # in finally block, throw
endReturnLocal, # at very end, return variable local
deopt, # deopt inside inlined function
) = flags
# BASIC RULES
# Only one alternative can be applied at any time.
if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+ alternativeFn5 > 1):
return
# In try, return or throw, or both.
if not (tryReturns or tryThrows): return
# Either doCatch or doFinally.
if not doCatch and not doFinally: return
# Catch flags only make sense when catching
if not doCatch and (catchReturns or catchWithLocal or catchThrows):
return
# Finally flags only make sense when finallying
if not doFinally and (finallyReturns or finallyThrows):
return
# tryFirstReturns is only relevant when both tryReturns and tryThrows are
# true.
if tryFirstReturns and not (tryReturns and tryThrows): return
# From the try and finally block, we can return or throw, but not both.
if catchReturns and catchThrows: return
if finallyReturns and finallyThrows: return
# If at the end we return the local, we need to have touched it.
if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
# PRUNING
anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
alternativeFn4, alternativeFn5])
specificAlternative = any([alternativeFn2, alternativeFn3])
rareAlternative = not specificAlternative
# If try returns and throws, then don't catchWithLocal, endReturnLocal, or
# deopt, or do any alternative.
if (tryReturns and tryThrows and
(catchWithLocal or endReturnLocal or deopt or anyAlternative)):
return
# We don't do any alternative if we do a finally.
if doFinally and anyAlternative: return
# We only use the local variable if we do alternative #2 or #3.
if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
not specificAlternative):
return
# We don't need to test deopting into a finally.
if doFinally and deopt: return
# We're only interested in alternative #2 if we have endReturnLocal, no
# catchReturns, and no catchThrows, and deopt.
if (alternativeFn2 and
(not endReturnLocal or catchReturns or catchThrows or not deopt)):
return
# Flag check succeeded.
trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames))
write(textwrap.fill(flagsMsgLine, subsequent_indent=' // '))
write("")
if not anyAlternative:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15()',
'increaseAndThrow42': 'increaseAndThrow42()',
}
elif alternativeFn1:
fragments = {
'increaseAndReturn15': 'returnOrThrow(true)',
'increaseAndThrow42': 'returnOrThrow(false)',
}
elif alternativeFn2:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
}
elif alternativeFn3:
fragments = {
'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
}
elif alternativeFn4:
fragments = {
'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
}
else:
assert alternativeFn5
fragments = {
'increaseAndReturn15': 'magic.prop /* returns 15 */',
'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
}
# As we print code, we also maintain what the result should be. Variable
# {result} can be one of three things:
#
# - None, indicating returning JS null
# - ("return", n) with n an integer
# - ("throw", n), with n an integer
result = None
# We also maintain what the counter should be at the end.
# The counter is reset just before f is called.
counter = 0
write( " f = function {} () {{".format(fnname(flags)))
write( " var local = 888;")
write( " deopt = {};".format("true" if deopt else "false"))
local = 888
write( " try {")
write( " counter++;")
counter += 1
resultTo = "local +=" if tryResultToLocal else "return"
if tryReturns and not (tryThrows and not tryFirstReturns):
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
if tryThrows:
write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
if result == None:
counter += 1
result = ("throw", 42)
if tryReturns and tryThrows and not tryFirstReturns:
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
write( " counter++;")
if result == None:
counter += 1
if doCatch:
write( " } catch (ex) {")
write( " counter++;")
if isinstance(result, tuple) and result[0] == 'throw':
counter += 1
if catchThrows:
write(" throw 2 + ex;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('throw', 2 + result[1])
elif catchReturns and catchWithLocal:
write(" return 2 + local;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + local)
elif catchReturns and not catchWithLocal:
write(" return 2 + ex;");
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + result[1])
elif catchWithLocal:
write(" local += ex;");
if isinstance(result, tuple) and result[0] == "throw":
local += result[1]
result = None
counter += 1
else:
if isinstance(result, tuple) and result[0] == "throw":
result = None
counter += 1
write( " counter++;")
if doFinally:
write( " } finally {")
write( " counter++;")
counter += 1
if finallyThrows:
write(" throw 25;")
result = ('throw', 25)
elif finallyReturns:
write(" return 3 + local;")
result = ('return', 3 + local)
elif not finallyReturns and not finallyThrows:
write(" local += 2;")
local += 2
counter += 1
else: assert False # unreachable
write( " counter++;")
write( " }")
write( " counter++;")
if result == None:
counter += 1
if endReturnLocal:
write( " return 5 + local;")
if result == None:
result = ('return', 5 + local)
write( " }")
if result == None:
write( " resetOptAndAssertResultEquals(undefined, f);")
else:
tag, value = result
if tag == "return":
write( " resetOptAndAssertResultEquals({}, f);".format(value))
else:
assert tag == "throw"
write( " resetOptAndAssertThrowsWith({}, f);".format(value))
write( " assertEquals({}, counter);".format(counter))
write( "")
global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
NUM_TESTS_PRINTED += 1
NUM_TESTS_IN_SHARD += 1
FILE = None # to be initialised to an open file
SHARD_NUM = 1
def write(*args):
return print(*args, file=FILE)
def rotateshard():
global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
if MODE != 'shard':
return
if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
return
if FILE != None:
finishshard()
assert FILE == None
FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
write_shard_header()
NUM_TESTS_IN_SHARD = 0
def finishshard():
global FILE, SHARD_NUM, MODE
assert FILE
write_shard_footer()
if MODE == 'shard':
print("Wrote shard {}.".format(SHARD_NUM))
FILE.close()
FILE = None
SHARD_NUM += 1
def write_shard_header():
if MODE == 'shard':
write("// Shard {}.".format(SHARD_NUM))
write("")
write(PREAMBLE)
write("")
def write_shard_footer():
write("}")
write("%NeverOptimizeFunction(runThisShard);")
write("")
write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
write("")
write("runThisShard();")
FLAGLETTERS="54321trflcrltfrtld"
flagtuple = namedtuple('flagtuple', (
"alternativeFn5",
"alternativeFn4",
"alternativeFn3",
"alternativeFn2",
"alternativeFn1",
"tryThrows",
"tryReturns",
"tryFirstReturns",
"tryResultToLocal",
"doCatch",
"catchReturns",
"catchWithLocal",
"catchThrows",
"doFinally",
"finallyReturns",
"finallyThrows",
"endReturnLocal",
"deopt"
))
emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
f1 = emptyflags._replace(tryReturns=True, doCatch=True)
# You can test function printtest with f1.
allFlagCombinations = [
flagtuple(*bools)
for bools in booltuples(len(flagtuple._fields))
]
if __name__ == '__main__':
global MODE
if sys.argv[1:] == []:
MODE = 'stdout'
print("// Printing all shards together to stdout.")
print("")
write_shard_header()
FILE = sys.stdout
elif sys.argv[1:] == ['--shard-and-overwrite']:
MODE = 'shard'
else:
print("Usage:")
print("")
print(" python {}".format(sys.argv[0]))
print(" print all tests to standard output")
print(" python {} --shard-and-overwrite".format(sys.argv[0]))
print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
print("")
print(sys.argv[1:])
print("")
sys.exit(1)
rotateshard()
for flags in allFlagCombinations:
printtest(flags)
rotateshard()
finishshard()
if MODE == 'shard':
print("Total: {} tests.".format(NUM_TESTS_PRINTED))
|
EinsteinsWorkshop/BlocksCAD | refs/heads/development | blockly/i18n/js_to_json.py | 177 | #!/usr/bin/python
# Gives the translation status of the specified apps and languages.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extracts messages from .js files into .json files for translation.
Specifically, lines with the following formats are extracted:
/// Here is a description of the following message.
Blockly.SOME_KEY = 'Some value';
Adjacent "///" lines are concatenated.
There are two output files, each of which is proper JSON. For each key, the
file en.json would get an entry of the form:
"Blockly.SOME_KEY", "Some value",
The file qqq.json would get:
"Blockly.SOME_KEY", "Here is a description of the following message.",
Commas would of course be omitted for the final entry of each value.
@author Ellen Spertus (ellen.spertus@gmail.com)
"""
import argparse
import codecs
import json
import os
import re
from common import write_files
_INPUT_DEF_PATTERN = re.compile("""Blockly.Msg.(\w*)\s*=\s*'([^']*)';?$""")
_INPUT_SYN_PATTERN = re.compile(
"""Blockly.Msg.(\w*)\s*=\s*Blockly.Msg.(\w*);""")
def main():
# Set up argument parser.
parser = argparse.ArgumentParser(description='Create translation files.')
parser.add_argument(
'--author',
default='Ellen Spertus <ellen.spertus@gmail.com>',
help='name and email address of contact for translators')
parser.add_argument('--lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--output_dir', default='json',
help='relative directory for output files')
parser.add_argument('--input_file', default='messages.js',
help='input file')
parser.add_argument('--quiet', action='store_true', default=False,
help='only display warnings, not routine info')
args = parser.parse_args()
if (not args.output_dir.endswith(os.path.sep)):
args.output_dir += os.path.sep
# Read and parse input file.
results = []
synonyms = {}
description = ''
infile = codecs.open(args.input_file, 'r', 'utf-8')
for line in infile:
if line.startswith('///'):
if description:
description = description + ' ' + line[3:].strip()
else:
description = line[3:].strip()
else:
match = _INPUT_DEF_PATTERN.match(line)
if match:
result = {}
result['meaning'] = match.group(1)
result['source'] = match.group(2)
if not description:
print('Warning: No description for ' + result['meaning'])
result['description'] = description
description = ''
results.append(result)
else:
match = _INPUT_SYN_PATTERN.match(line)
if match:
if description:
print('Warning: Description preceding definition of synonym {0}.'.
format(match.group(1)))
description = ''
synonyms[match.group(1)] = match.group(2)
infile.close()
# Create <lang_file>.json, keys.json, and qqq.json.
write_files(args.author, args.lang, args.output_dir, results, False)
# Create synonyms.json.
synonym_file_name = os.path.join(os.curdir, args.output_dir, 'synonyms.json')
with open(synonym_file_name, 'w') as outfile:
json.dump(synonyms, outfile)
if not args.quiet:
print("Wrote {0} synonym pairs to {1}.".format(
len(synonyms), synonym_file_name))
if __name__ == '__main__':
main()
|
SangDeukLee/futsal | refs/heads/develop | public/assets/node_js/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
marhar/cx_OracleTools | refs/heads/master | cx_PyOracleLib/cx_ImportData.py | 1 | """Defines class for importing data from an export file."""
import cPickle
import cx_Logging
import cx_Oracle
import os
import sys
class Importer:
"""Handles importing data from the file."""
def __init__(self, connection):
self.connection = connection
self.cursor = connection.cursor()
self.inFile = None
self.reportPoint = None
self.reportFunc = self.ReportProgress
self.commitPoint = None
def __iter__(self):
return self
def DataInTable(self):
"""Return a list of the data stored in the table."""
rows = []
while True:
row = cPickle.load(self.inFile)
if row is None:
break
rows.append(row)
return rows
def ImportTable(self):
"""Import the data into the table and return the number of rows
imported."""
numRows = 0
rowsToInsert = []
while True:
row = cPickle.load(self.inFile)
if row is None:
break
rowsToInsert.append(row)
numRows += 1
commit = (self.commitPoint is not None \
and numRows % self.commitPoint == 0)
if commit or len(rowsToInsert) == self.cursor.arraysize:
self.cursor.executemany(None, rowsToInsert)
rowsToInsert = []
if commit:
self.connection.commit()
if self.reportPoint and numRows % self.reportPoint == 0:
self.reportFunc(numRows)
if rowsToInsert:
self.cursor.executemany(None, rowsToInsert)
self.connection.commit()
if not self.reportPoint or numRows == 0 \
or numRows % self.reportPoint != 0:
self.reportFunc(numRows)
return numRows
def next(self):
"""Return the next table name to process."""
tableName = cPickle.load(self.inFile)
if tableName is None:
raise StopIteration
columnNames = []
bindVarNames = []
bindVars = []
columns = cPickle.load(self.inFile)
for name, dataType in columns:
dataSize = 0
if "," in dataType:
dataType, dataSize = dataType.split(",")
dataSize = int(dataSize)
dataType = getattr(cx_Oracle, dataType)
columnNames.append(name)
bindVars.append(self.cursor.var(dataType, dataSize))
bindVarNames.append(":%s" % len(bindVars))
sql = "insert into %s (%s) values (%s)" % \
(tableName, ",".join(columnNames), ",".join(bindVarNames))
self.cursor.prepare(sql)
self.cursor.setinputsizes(*bindVars)
return tableName, columnNames
def OpenFile(self, fileName):
"""Open the file for importing."""
if fileName == "-":
self.inFile = sys.stdin
self.inFileSize = None
else:
self.inFile = file(fileName, "rb")
self.inFileSize = os.stat(fileName).st_size * 1.0
def ReportProgress(self, numRows):
"""Report progress on the import."""
if self.inFileSize is not None:
percent = (self.inFile.tell() / self.inFileSize) * 100
cx_Logging.Trace(" %d rows imported (%.0f%% of file).",
numRows, percent)
else:
cx_Logging.Trace(" %d rows imported.", numRows)
def SkipTable(self):
"""Skip the import of the table."""
while cPickle.load(self.inFile):
pass
|
Onirik79/aaritmud | refs/heads/master | data/proto_mobs/accademia-drago/accademia-drago_mob_ragazza.py | 1 | # -*- coding: utf-8 -*-
#= IMPORT ======================================================================
import random
from src.log import log
from src.database import database
from src.enums import TO, RACE, FLAG, ENTITYPE
from src.commands.command_say import command_say
from src.commands.command_eat import command_eat
#= COSTANTI ===================================================================
TROLL_PROTO_CODE = "accademia-drago_mob_immagine-grark"
#= FUNZIONI ===================================================================
def before_incoming(entity, from_room, direction, to_room, ragazza, running, behavioured):
change_descr(ragazza)
#- Fine Funzione -
def before_intuited(entity, ragazza, descr, detail, behavioured):
change_descr(ragazza)
#- Fine Funzione -
def before_looked(entity, ragazza, descr, detail, use_examine, behavioured):
change_descr(ragazza)
#- Fine Funzione -
def change_descr(ragazza):
if not ragazza:
return
troll = cerca_il_troll(ragazza.location, TROLL_PROTO_CODE)
if not troll:
ragazza.long = "$N appare preoccupata."
ragazza.descr_sixth = "Ne deve aver passate di brutte esperienze..."
else:
ragazza.long = "$N piange disperata per le sevizie del troll"
ragazza.descr_sixth = "#looker is SEX.MALE#Mhhhh... un'umana di un certa sua bellezza... almeno una volta tolti i vari strati di sudiciume.... potresti guardare senza intervenire, oppure potresti salvarla, oppure ancora potresti salvarla per vedere quanto vale la sua riconoscenza...#Mhhhh... Porre fine a questa situazione? Approfittarne? Tirar dritto?#"
def before_die(ragazza, opponent):
troll = cerca_il_troll(ragazza.location, TROLL_PROTO_CODE)
if troll:
troll.long = "$N grugnisce soddisfatto"
#- Fine Funzione -
def cerca_il_troll(location, troll_proto_code):
for en in location.iter_contains():
if not en.IS_PLAYER and en.prototype.code == troll_proto_code:
return en
return None
#- Fine Funzione -
|
farragar/game-engine | refs/heads/master | src/engine.py | 1 | import copy
import sys
import pickle
import writer
from player import Player
"""
Simple engine for text-based adventure games
laurie@farragar.com
Big TODOs:
Implement items, stats & transition dependencies
Improve IO - print and raw_input doesn't cut it.
Improve state key/transition usability
Option to disable magic 0.2s wait
Print CMDs
Pass game state not game object to achievement unlock fns
DOCUMENTATION
"""
class Achievement:
def __init__(self, title, msg, should_unlock):
self.title = title
self.msg = msg
self.should_unlock = should_unlock
class State:
def __init__(self, key, msg, transitions, entry_hook=lambda x: None):
self.key = key
self.msg = msg
self.transitions = transitions
self.__entry_hook = entry_hook
def entry_hook(self, game):
self.init(game)
self.__entry_hook(game)
def init(self, game):
if callable(self.msg):
self.msg = self.msg(game)
if callable(self.transitions):
self.transitions = self.transitions(game)
self.transitions_map = {
transition.key: transition
for transition in self.transitions
if transition.is_active(game)
}
class Transition:
def __init__(self, key, msg, target_state_key, is_active=lambda x: True):
self.key = key
self.msg = msg
self.target_state_key = target_state_key
self.is_active = is_active
class Game:
unlocked_achievements = set([])
def __init__(self, states, achievements=[]):
self.cmds = {
'X' : self.save_and_quit,
'Q' : self.quit
}
self.states = states
self.achievements = achievements
self.states = {
state.key: state for state in self.states
}
self.has_save = self.load_persistent()
self.reset_state()
self.game_state = {
'path': self.path,
'stats': self.player.stats,
'items': self.player.items
}
@property
def current_state(self):
return self.states[self.path[-1]]
def load_persistent(self):
try:
state_file = open('save/state.sav', 'rb')
game_state = pickle.load(state_file)
state_file.close()
self.unlocked_achievements = game_state['unlocked_achievements']
except (FileNotFoundError, EOFError):
return False
return True
def load_game(self):
state_file = open('save/state.sav', 'rb')
game_state = pickle.load(state_file)
state_file.close()
self.path = game_state['path']
self.stats = game_state['stats']
self.items = game_state['items']
def get_game_state(self):
return {
'unlocked_achievements': self.unlocked_achievements,
'path': self.path,
'stats': [],
'items': []
}
def reset_state(self):
self.over = False
self.path = ['MENU']
self.current_state.init(self)
self.player = Player('TODO')
def save(self):
state_file = open('save/state.sav', 'wb+')
pickle.dump(self.get_game_state(), state_file)
state_file.close()
def save_and_quit(self):
self.save()
self.quit()
def test_achievements(self):
for a in self.achievements:
if a.should_unlock(self) and a.title not in game.unlocked_achievements:
self.unlock(a)
def unlock(self, achievement):
writer.print_unlock_msg(achievement)
self.unlocked_achievements.add(achievement.title)
def quit(self):
sys.exit()
def step(self):
self.over = not self.current_state.transitions
writer.print_state_text(self.current_state)
choices = [x for x in self.current_state.transitions
if x.is_active(self)
]
writer.print_choices(choices)
new_state = None
while new_state is None and not self.over:
try:
choice = input('--> ').upper()
if choice in self.cmds:
self.cmds[choice]()
new_state = self.states[self.current_state.transitions_map[choice].target_state_key]
new_state.entry_hook(self)
new_state.init(self)
if new_state.key != 'CONTINUE':
self.path.append(new_state.key)
except KeyError:
pass
|
Stavitsky/nova | refs/heads/master | nova/tests/unit/fake_server_actions.py | 97 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import db
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
FAKE_REQUEST_ID1 = 'req-3293a3f1-b44c-4609-b8d2-d81b105636b8'
FAKE_REQUEST_ID2 = 'req-25517360-b757-47d3-be45-0e8d2a01b36a'
FAKE_ACTION_ID1 = 123
FAKE_ACTION_ID2 = 456
FAKE_ACTIONS = {
FAKE_UUID: {
FAKE_REQUEST_ID1: {'id': FAKE_ACTION_ID1,
'action': 'reboot',
'instance_uuid': FAKE_UUID,
'request_id': FAKE_REQUEST_ID1,
'project_id': '147',
'user_id': '789',
'start_time': datetime.datetime(
2012, 12, 5, 0, 0, 0, 0),
'finish_time': None,
'message': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
},
FAKE_REQUEST_ID2: {'id': FAKE_ACTION_ID2,
'action': 'resize',
'instance_uuid': FAKE_UUID,
'request_id': FAKE_REQUEST_ID2,
'user_id': '789',
'project_id': '842',
'start_time': datetime.datetime(
2012, 12, 5, 1, 0, 0, 0),
'finish_time': None,
'message': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
}
}
FAKE_EVENTS = {
FAKE_ACTION_ID1: [{'id': 1,
'action_id': FAKE_ACTION_ID1,
'event': 'schedule',
'start_time': datetime.datetime(
2012, 12, 5, 1, 0, 2, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 1, 2, 0, 0),
'result': 'Success',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
},
{'id': 2,
'action_id': FAKE_ACTION_ID1,
'event': 'compute_create',
'start_time': datetime.datetime(
2012, 12, 5, 1, 3, 0, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 1, 4, 0, 0),
'result': 'Success',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
],
FAKE_ACTION_ID2: [{'id': 3,
'action_id': FAKE_ACTION_ID2,
'event': 'schedule',
'start_time': datetime.datetime(
2012, 12, 5, 3, 0, 0, 0),
'finish_time': datetime.datetime(
2012, 12, 5, 3, 2, 0, 0),
'result': 'Error',
'traceback': '',
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
]
}
def fake_action_event_start(*args):
return FAKE_EVENTS[FAKE_ACTION_ID1][0]
def fake_action_event_finish(*args):
return FAKE_EVENTS[FAKE_ACTION_ID1][0]
def stub_out_action_events(stubs):
stubs.Set(db, 'action_event_start', fake_action_event_start)
stubs.Set(db, 'action_event_finish', fake_action_event_finish)
|
acsone/stock-logistics-workflow | refs/heads/8.0 | procurement_jit_assign_move/__init__.py | 8 | # -*- coding: utf-8 -*-
# © 2015 Alexandre Fayolle <alexandre.fayolle@camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import models
|
mgamer/gyp | refs/heads/master | test/generator-output/gyptest-rules.py | 198 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
|
kalahbrown/HueBigSQL | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/string_lookup/models.py | 113 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __str__(self):
return "Foo %s" % self.name
@python_2_unicode_compatible
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __str__(self):
return "Bar %s" % self.place.name
@python_2_unicode_compatible
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Whiz %s" % self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __str__(self):
return "Child %s" % self.name
@python_2_unicode_compatible
class Base(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Base %s" % self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
|
dennis-sheil/commandergenius | refs/heads/sdl_android | project/jni/python/src/Lib/plat-irix6/GET.py | 132 | # Symbols from <gl/get.h>
from warnings import warnpy3k
warnpy3k("the GET module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
BCKBUFFER = 0x1
FRNTBUFFER = 0x2
DRAWZBUFFER = 0x4
DMRGB = 0
DMSINGLE = 1
DMDOUBLE = 2
DMRGBDOUBLE = 5
HZ30 = 0
HZ60 = 1
NTSC = 2
HDTV = 3
VGA = 4
IRIS3K = 5
PR60 = 6
PAL = 9
HZ30_SG = 11
A343 = 14
STR_RECT = 15
VOF0 = 16
VOF1 = 17
VOF2 = 18
VOF3 = 19
SGI0 = 20
SGI1 = 21
SGI2 = 22
HZ72 = 23
GL_VIDEO_REG = 0x00800000
GLV_GENLOCK = 0x00000001
GLV_UNBLANK = 0x00000002
GLV_SRED = 0x00000004
GLV_SGREEN = 0x00000008
GLV_SBLUE = 0x00000010
GLV_SALPHA = 0x00000020
GLV_TTLGENLOCK = 0x00000080
GLV_TTLSYNC = GLV_TTLGENLOCK
GLV_GREENGENLOCK = 0x0000100
LEFTPLANE = 0x0001
RIGHTPLANE = 0x0002
BOTTOMPLANE = 0x0004
TOPPLANE = 0x0008
NEARPLANE = 0x0010
FARPLANE = 0x0020
## GETDEF = __GL_GET_H__
NOBUFFER = 0x0
BOTHBUFFERS = 0x3
DMINTENSITYSINGLE = 3
DMINTENSITYDOUBLE = 4
MONSPECIAL = 0x20
HZ50 = 3
MONA = 5
MONB = 6
MONC = 7
MOND = 8
MON_ALL = 12
MON_GEN_ALL = 13
CMAPMULTI = 0
CMAPONE = 1
|
zombi-x/android_kernel_lge_mako | refs/heads/mm6.0 | tools/perf/util/setup.py | 4998 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
UCNA/main | refs/heads/master | Scripts/CombineCorrelated.py | 1 | #!/usr/bin/python
# implementation of eLog 396 correlated error combination formalism
from math import *
from numpy import zeros,matrix
class MeasCombiner:
def __init__(self):
self.mu = []
self.err = []
self.corrs = {}
def new_meas_mu(self,m):
self.mu.append(m)
self.err.append([])
return len(self.mu)-1
def new_meas_err(self,meas_id,err):
self.err[meas_id].append(err)
err_id = (meas_id,len(self.err[meas_id])-1)
self.corrs[(err_id,err_id)] = 1.0
return err_id
def add_correlation(self,err_id_1,err_id_2,corr):
self.corrs[(err_id_1,err_id_2)] = corr
self.corrs[(err_id_2,err_id_1)] = corr
def calc_combo(self):
print "Corrs = ",self.corrs
self.ep_i = []
n = len(self.mu)
for i in range(n):
self.ep_i.append(0)
for j in range(len(self.err[i])):
for k in range(len(self.err[i])):
self.ep_i[-1] += self.err[i][j]*self.err[i][k]*self.corrs.get(((i,j),(i,k)),0)
self.ep_i[-1] = sqrt(self.ep_i[-1])
print "ep_i =",self.ep_i
self.C_ij = matrix(zeros((n,n)))
self.M_ij = matrix(zeros((n,n)))
for i in range(n):
for j in range(n):
for k in range(len(self.err[i])):
for l in range(len(self.err[j])):
self.M_ij[i,j] += self.err[i][k]*self.err[j][l]*self.corrs.get(((i,k),(j,l)),0)
self.C_ij[i,j] = self.M_ij[i,j] / (self.ep_i[i]*self.ep_i[j])
print "C_ij =",self.C_ij
print "M_ij =",self.M_ij
self.W = self.M_ij.getI() * matrix([[1] for i in range(n)])
self.W /= sum(self.W)
print "weight =",self.W
err = 0
combo = 0
for i in range(n):
combo += self.mu[i]*self.W[i,0]
for j in range(n):
err += self.W[i,0]*self.ep_i[i]*self.C_ij[i,j]*self.ep_i[j]*self.W[j,0]
err = sqrt(err)
print
print "err =",err
print "combo =",combo
def errcombo(self,errlist):
err = 0
n = len(self.mu)
for i in range(n):
for j in range(n):
for k in range(len(self.err[i])):
for l in range(len(self.err[j])):
if (i,k) in errlist and (j,l) in errlist:
err += self.W[i,0]*self.err[i][k]*self.W[j,0]*self.err[j][l]*self.corrs.get(((i,k),(j,l)),0)
err = sqrt(err)
return err
if __name__=="__main__":
# eLog 396 example
if 0:
MC = MeasCombiner()
mu0 = MC.new_meas_mu(100.0)
MC.new_meas_err(mu0,0.6)
err01 = MC.new_meas_err(mu0,0.3)
err02 = MC.new_meas_err(mu0,0.2)
mu1 = MC.new_meas_mu(100.0)
MC.new_meas_err(mu1,0.6)
err11 = MC.new_meas_err(mu1,0.3)
err12 = MC.new_meas_err(mu1,0.6)
MC.add_correlation(err01,err11,1.0)
MC.add_correlation(err02,err12,1.0)
MC.calc_combo()
# 2012 Before + After
if 1:
MC = MeasCombiner()
mu0 = MC.new_meas_mu(-0.11851)
stat0 = MC.new_meas_err(mu0,0.00075) # stat
err01 = MC.new_meas_err(mu0,0.000712) # syst
dst0 = MC.new_meas_err(mu0,0.000119) # depol stat
err02 = MC.new_meas_err(mu0,0.000640) # depol syst
mu1 = MC.new_meas_mu(-0.12100)
stat1 = MC.new_meas_err(mu1,0.00078) # stat
err11 = MC.new_meas_err(mu1,0.000726) # syst
dst1 = MC.new_meas_err(mu1,0.000387) # depol stat
err12 = MC.new_meas_err(mu1,0.000666) # depol syst
MC.add_correlation(err01,err11,1.0)
MC.add_correlation(err02,err12,1.0)
MC.calc_combo()
print "stat =",MC.errcombo([stat0,stat1])
print "syst =",MC.errcombo([err01,err11])
print "depol =",MC.errcombo([dst0,dst1,err02,err12])
print "systot =",MC.errcombo([err01,err11,dst0,dst1,err02,err12])
print
# 2010 + 2012 results, + side
if 1:
MC = MeasCombiner()
mu0 = MC.new_meas_mu(0.11954)
MC.new_meas_err(mu0,0.00055) # stat
err01 = MC.new_meas_err(mu0,0.00072) # syst
err02 = MC.new_meas_err(mu0,0.00067) # depol
mu1 = MC.new_meas_mu(0.11966)
MC.new_meas_err(mu1,0.00089) # stat
err11 = MC.new_meas_err(mu1,0.001254) # syst
err12 = MC.new_meas_err(mu1,0.000622) # depol
MC.add_correlation(err01,err11,1.0)
MC.calc_combo()
print
# 2010 + 2012 results, - side
if 1:
MC = MeasCombiner()
mu0 = MC.new_meas_mu(0.11954)
MC.new_meas_err(mu0,0.00055) # stat
err01 = MC.new_meas_err(mu0,0.00072) # syst
err02 = MC.new_meas_err(mu0,0.00067) # depol
mu1 = MC.new_meas_mu(0.11966)
MC.new_meas_err(mu1,0.00089) # stat
err11 = MC.new_meas_err(mu1,0.00123) # syst
MC.add_correlation(err01,err11,1.0)
MC.calc_combo()
print
|
ColOfAbRiX/ansible | refs/heads/devel | test/sanity/validate-modules/test_validate_modules_regex.py | 19 | #!/usr/bin/env python
# This is a standalone test for the regex inside validate-modules
# It is not suitable to add to the make tests target because the
# file under test is outside the test's sys.path AND has a hyphen
# in the name making it unimportable.
#
# To execute this by hand:
# 1) cd <checkoutdir>
# 2) source hacking/env-setup
# 3) PYTHONPATH=./lib nosetests -d -w test -v --nocapture sanity/validate-modules
import os
import re
from ansible.compat.tests import unittest
#TYPE_REGEX = re.compile(r'.*\stype\(.*')
#TYPE_REGEX = re.compile(r'.*(if|or)\stype\(.*')
#TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
#TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
#TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)type\(.*')
#TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)(?<!_)type\(.*')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)(?<!_)(?<!str\()type\(.*')
class TestValidateModulesRegex(unittest.TestCase):
def test_type_regex(self):
# each of these examples needs to be matched or not matched
checks = [
['if type(foo) is Bar', True],
['if Bar is type(foo)', True],
['if type(foo) is not Bar', True],
['if Bar is not type(foo)', True],
['if type(foo) == Bar', True],
['if Bar == type(foo)', True],
['if type(foo)==Bar', True],
['if Bar==type(foo)', True],
['if type(foo) != Bar', True],
['if Bar != type(foo)', True],
['if type(foo)!=Bar', True],
['if Bar!=type(foo)', True],
['if foo or type(bar) != Bar', True],
['x = type(foo)', False],
["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False],
#cloud/amazon/ec2_group.py
["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False],
#files/patch.py
["p = type('Params', (), module.params)", False], #files/patch.py
#system/osx_defaults.py
["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True],
#system/osx_defaults.py
['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False],
#network/nxos/nxos_interface.py
["if get_interface_type(interface) == 'svi':", False],
]
for idc,check in enumerate(checks):
cstring = check[0]
cexpected = check[1]
match = TYPE_REGEX.match(cstring)
if cexpected and not match:
assert False, "%s should have matched" % cstring
elif not cexpected and match:
assert False, "%s should not have matched" % cstring
|
girlygiggle/girlygiggle | refs/heads/master | contrib/wallettools/walletunlock.py | 782 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9332")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
TextusData/Mover | refs/heads/master | thirdparty/protobuf-2.4.1/gtest/test/gtest_xml_test_utils.py | 306 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
An exception is any attribute named "time", which needs only be
convertible to a floating-point number.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
"attribute numbers differ in element " + actual_node.tagName)
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
"expected attribute %s not found in element %s" %
(expected_attr.name, actual_node.tagName))
self.assertEquals(expected_attr.value, actual_attr.value,
" values of attribute %s in element %s differ" %
(expected_attr.name, actual_node.tagName))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
"number of child elements differ in element " + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
"testsuites": "name",
"testsuite": "name",
"testcase": "name",
"failure": "message",
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
"Encountered unknown element <%s>" % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if "detail" not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children["detail"] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children["detail"].nodeValue += child.nodeValue
else:
self.fail("Encountered unexpected node type %d" % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The line number reported in the first line of the "message"
attribute of <failure> elements is replaced with a single asterisk.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName in ("testsuites", "testsuite", "testcase"):
time = element.getAttributeNode("time")
time.value = re.sub(r"^\d+(\.\d+)?$", "*", time.value)
elif element.tagName == "failure":
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Removes the source line number.
cdata = re.sub(r"^.*[/\\](.*:)\d+\n", "\\1*\n", child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r"\nStack trace:\n(.|\n)*",
"", cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
|
projectjamjar/masonjar | refs/heads/master | jamjar/jamjar/videos/migrations/0006_video_concert.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('concerts', '0001_initial'),
('videos', '0005_auto_20160118_0340'),
]
operations = [
migrations.AddField(
model_name='video',
name='concert',
field=models.ForeignKey(related_name='concert', default=None, to='concerts.Concert'),
preserve_default=False,
),
]
|
stacywsmith/ansible | refs/heads/devel | lib/ansible/plugins/lookup/etcd.py | 36 | # (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
author:
- Jan-Piet Mens (@jpmens)
lookup: etcd
version_added: "2.1"
short_description: get info from etcd server
description:
- Retrieves data from an etcd server
options:
_raw:
description:
- the list of keys to lookup on the etcd server
type: list
element_type: string
required: True
_etcd_url:
description:
- Environment variable with the url for the etcd server
default: 'http://127.0.0.1:4001'
env_vars:
- name: ANSIBLE_ETCD_URL
_etcd_version:
description:
- Environment variable with the etcd protocol version
default: 'v1'
env_vars:
- name: ANSIBLE_ETCD_VERSION
EXAMPLES:
- name: "a value from a locally running etcd"
debug: msg={{ lookup('etcd', 'foo') }}
RETURN:
_list:
description:
- list of values associated with input keys
type: strings
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
try:
import json
except ImportError:
import simplejson as json
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url
# this can be made configurable, not should not use ansible.cfg
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
if os.getenv('ANSIBLE_ETCD_URL') is not None:
ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
ANSIBLE_ETCD_VERSION = 'v1'
if os.getenv('ANSIBLE_ETCD_VERSION') is not None:
ANSIBLE_ETCD_VERSION = os.environ['ANSIBLE_ETCD_VERSION']
class Etcd:
def __init__(self, url=ANSIBLE_ETCD_URL, version=ANSIBLE_ETCD_VERSION,
validate_certs=True):
self.url = url
self.version = version
self.baseurl = '%s/%s/keys' % (self.url,self.version)
self.validate_certs = validate_certs
def get(self, key):
url = "%s/%s" % (self.baseurl, key)
data = None
value = ""
try:
r = open_url(url, validate_certs=self.validate_certs)
data = r.read()
except:
return value
try:
# {"action":"get","key":"/name","value":"Jane Jolie","index":5}
item = json.loads(data)
if self.version == 'v1':
if 'value' in item:
value = item['value']
else:
if 'node' in item:
value = item['node']['value']
if 'errorCode' in item:
value = "ENOENT"
except:
raise
pass
return value
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
etcd = Etcd(validate_certs=validate_certs)
ret = []
for term in terms:
key = term.split()[0]
value = etcd.get(key)
ret.append(value)
return ret
|
acsone/social | refs/heads/8.0 | mail_follower_custom_notification/models/mail_notification.py | 2 | # -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models
class MailNotification(models.Model):
_inherit = 'mail.notification'
@api.multi
def get_partners_to_email(self, message):
partner_ids = super(MailNotification, self)\
.get_partners_to_email(message)
for this in self:
follower = self.env['mail.followers'].search([
('res_model', '=', message.model),
('res_id', '=', message.res_id),
('partner_id', '=', this.partner_id.id),
'|', '|',
('force_nomail_subtype_ids', '=', message.subtype_id.id),
('force_mail_subtype_ids', '=', message.subtype_id.id),
('force_own_subtype_ids', '=', message.subtype_id.id),
])
if not follower:
continue
if (message.subtype_id in follower.force_mail_subtype_ids or
message.subtype_id in follower.force_own_subtype_ids) and\
this.partner_id.id not in partner_ids:
partner_ids.append(this.partner_id.id)
if message.subtype_id in follower.force_nomail_subtype_ids and\
this.partner_id.id in partner_ids:
partner_ids.remove(this.partner_id.id)
return partner_ids
|
thnee/ansible | refs/heads/devel | lib/ansible/modules/remote_management/manageiq/manageiq_alerts.py | 11 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_alerts
short_description: Configuration of alerts in ManageIQ
extends_documentation_fragment: manageiq
version_added: '2.5'
author: Elad Alfassa (@elad661) <ealfassa@redhat.com
description:
- The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
options:
state:
description:
- absent - alert should not exist,
- present - alert should exist,
required: False
choices: ['absent', 'present']
default: 'present'
description:
description:
- The unique alert description in ManageIQ.
- Required when state is "absent" or "present".
resource_type:
description:
- The entity type for the alert in ManageIQ. Required when state is "present".
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
'ExtManagementSystem', 'MiddlewareServer']
expression_type:
description:
- Expression type.
default: hash
choices: ["hash", "miq"]
expression:
description:
- The alert expression for ManageIQ.
- Can either be in the "Miq Expression" format or the "Hash Expression format".
- Required if state is "present".
enabled:
description:
- Enable or disable the alert. Required if state is "present".
type: bool
options:
description:
- Additional alert options, such as notification type and frequency
'''
EXAMPLES = '''
- name: Add an alert with a "hash expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 01
options:
notifications:
email:
to: ["example@example.com"]
from: "example@example.com"
resource_type: ContainerNode
expression:
eval_method: hostd_log_threshold
mode: internal
options: {}
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Add an alert with a "miq expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 02
options:
notifications:
email:
to: ["example@example.com"]
from: "example@example.com"
resource_type: Vm
expression_type: miq
expression:
and:
- CONTAINS:
tag: Vm.managed-environment
value: prod
- not:
CONTAINS:
tag: Vm.host.managed-environment
value: prod
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete an alert from ManageIQ
manageiq_alerts:
state: absent
description: Test Alert 01
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQAlert(object):
""" Represent a ManageIQ alert. Can be initialized with both the format
we receive from the server and the format we get from the user.
"""
def __init__(self, alert):
self.description = alert['description']
self.db = alert['db']
self.enabled = alert['enabled']
self.options = alert['options']
self.hash_expression = None
self.miq_expressipn = None
if 'hash_expression' in alert:
self.hash_expression = alert['hash_expression']
if 'miq_expression' in alert:
self.miq_expression = alert['miq_expression']
if 'exp' in self.miq_expression:
# miq_expression is a field that needs a special case, because
# it's returned surrounded by a dict named exp even though we don't
# send it with that dict.
self.miq_expression = self.miq_expression['exp']
def __eq__(self, other):
""" Compare two ManageIQAlert objects
"""
return self.__dict__ == other.__dict__
class ManageIQAlerts(object):
""" Object to execute alert management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
def get_alerts(self):
""" Get all alerts from ManageIQ
"""
try:
response = self.client.get(self.alerts_url + '?expand=resources')
except Exception as e:
self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
return response.get('resources', [])
def validate_hash_expression(self, expression):
""" Validate a 'hash expression' alert definition
"""
# hash expressions must have the following fields
for key in ['options', 'eval_method', 'mode']:
if key not in expression:
msg = "Hash expression is missing required field {key}".format(key=key)
self.module.fail_json(msg)
def create_alert_dict(self, params):
""" Create a dict representing an alert
"""
if params['expression_type'] == 'hash':
# hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
self.validate_hash_expression(params['expression'])
expression_type = 'hash_expression'
else:
# actually miq_expression, but we call it "expression" for backwards-compatibility
expression_type = 'expression'
# build the alret
alert = dict(description=params['description'],
db=params['resource_type'],
options=params['options'],
enabled=params['enabled'])
# add the actual expression.
alert.update({expression_type: params['expression']})
return alert
def add_alert(self, alert):
""" Add a new alert to ManageIQ
"""
try:
result = self.client.post(self.alerts_url, action='create', resource=alert)
msg = "Alert {description} created successfully: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Creating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to create a hash expression
msg = msg.format(description=alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def delete_alert(self, alert):
""" Delete an alert
"""
try:
result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
id=alert['id']),
action="delete")
msg = "Alert {description} deleted: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Deleting alert {description} failed: {error}"
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def update_alert(self, existing_alert, new_alert):
""" Update an existing alert with the values from `new_alert`
"""
new_alert_obj = ManageIQAlert(new_alert)
if new_alert_obj == ManageIQAlert(existing_alert):
# no change needed - alerts are identical
return dict(changed=False, msg="No update needed")
else:
try:
url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
result = self.client.post(url, action="edit", resource=new_alert)
# make sure that the update was indeed successful by comparing
# the result to the expected result.
if new_alert_obj == ManageIQAlert(result):
# success!
msg = "Alert {description} updated successfully: {details}"
msg = msg.format(description=existing_alert['description'], details=result)
return dict(changed=True, msg=msg)
else:
# unexpected result
msg = "Updating alert {description} failed, unexpected result {details}"
msg = msg.format(description=existing_alert['description'], details=result)
self.module.fail_json(msg=msg)
except Exception as e:
msg = "Updating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to update a hash expression
msg = msg.format(description=existing_alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=existing_alert['description'], error=e)
self.module.fail_json(msg=msg)
def main():
argument_spec = dict(
description=dict(type='str'),
resource_type=dict(type='str', choices=['Vm',
'ContainerNode',
'MiqServer',
'Host',
'Storage',
'EmsCluster',
'ExtManagementSystem',
'MiddlewareServer']),
expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
expression=dict(type='dict'),
options=dict(type='dict'),
enabled=dict(type='bool'),
state=dict(required=False, default='present',
choices=['present', 'absent']),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(argument_spec=argument_spec,
required_if=[('state', 'present', ['description',
'resource_type',
'expression',
'enabled',
'options']),
('state', 'absent', ['description'])])
state = module.params['state']
description = module.params['description']
manageiq = ManageIQ(module)
manageiq_alerts = ManageIQAlerts(manageiq)
existing_alert = manageiq.find_collection_resource_by("alert_definitions",
description=description)
# we need to add or update the alert
if state == "present":
alert = manageiq_alerts.create_alert_dict(module.params)
if not existing_alert:
# an alert with this description doesn't exist yet, let's create it
res_args = manageiq_alerts.add_alert(alert)
else:
# an alert with this description exists, we might need to update it
res_args = manageiq_alerts.update_alert(existing_alert, alert)
# this alert should not exist
elif state == "absent":
# if we have an alert with this description, delete it
if existing_alert:
res_args = manageiq_alerts.delete_alert(existing_alert)
else:
# it doesn't exist, and that's okay
msg = "Alert '{description}' does not exist in ManageIQ"
msg = msg.format(description=description)
res_args = dict(changed=False, msg=msg)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
|
tornadozou/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/argmax_op_test.py | 56 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.test_session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = ans.eval()
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testDim(self, dtype):
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
# Check that argmin and argmax match numpy along all axes
for axis in range(-5, 5):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.test_session(use_gpu=True):
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = ans.eval()
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.test_session(use_gpu=True):
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = ans.eval()
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testDim(np.int64)
def testEmpty(self):
with self.test_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
def testDefaultAxis(self):
with self.test_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
if __name__ == "__main__":
test.main()
|
stefanv/aandete | refs/heads/master | app/lib/pygments/scanner.py | 31 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos.
"""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
mglukhikh/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/core/files/utils.py | 901 | class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
|
ospaceteam/outerspace | refs/heads/master | server/lib/pyasn1/codec/der/decoder.py | 37 | # DER decoder
from pyasn1.type import univ
from pyasn1.codec.cer import decoder
decode = decoder.Decoder(decoder.tagMap, decoder.typeMap)
|
rgom/Pydev | refs/heads/development | plugins/org.python.pydev.jython/Lib/xml/etree/ElementTree.py | 22 | #
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __nonzero__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
# assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
# for element in elements:
# assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
# assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, basestring) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName(object):
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree(object):
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Finds the first toplevel element with given tag.
# Same as getroot().find(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Finds the element text for the first toplevel element with given
# tag. Same as getroot().findtext(path).
#
# @param path What toplevel element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Finds all toplevel elements with the given tag.
# Same as getroot().findall(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
# @keyparam default_namespace Sets the default XML namespace (for "xmlns").
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
def write(self, file_or_filename,
# keyword arguments
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
# assert self._root is not None
if not method:
method = "xml"
elif method not in _serialize:
# FIXME: raise an ImportError for c14n if ElementC14N is missing?
raise ValueError("unknown method %r" % method)
if hasattr(file_or_filename, "write"):
file = file_or_filename
else:
file = open(file_or_filename, "wb")
write = file.write
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
elif xml_declaration or (xml_declaration is None and
encoding not in ("utf-8", "us-ascii")):
if method == "xml":
write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
if method == "text":
_serialize_text(write, self._root, encoding)
else:
qnames, namespaces = _namespaces(
self._root, encoding, default_namespace
)
serialize = _serialize[method]
serialize(write, self._root, encoding, qnames, namespaces)
if file_or_filename is not file:
file.close()
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _encode(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _encode(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def _serialize_text(write, elem, encoding):
for part in elem.itertext():
write(part.encode(encoding))
if elem.tail:
write(elem.tail.encode(encoding))
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in _namespace_map.items():
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
return "".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. The string is returned as a sequence of string fragments.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser, close_source)
class _IterParseIterator(object):
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = (uri or "").encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def next(self):
while 1:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder(object):
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser(object):
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return text.encode("ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = fixtext(value)
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(self._fixtext(text))
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _comment(self, data):
try:
comment = self.target.comment
except AttributeError:
pass
else:
return comment(self._fixtext(data))
def _pi(self, target, data):
try:
pi = self.target.pi
except AttributeError:
pass
else:
return pi(self._fixtext(target), self._fixtext(data))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error, v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error, v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
|
axthorpe/cs3240-labdemo | refs/heads/master | helper.py | 2 | def greeting (msg):
print(msg) |
hunch/hunch-gift-app | refs/heads/master | django/db/backends/mysql/creation.py | 12 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
shiquanwang/pylearn2 | refs/heads/master | pylearn2/utils/mnist_ubyte.py | 5 | """
Low-level utilities for reading in raw MNIST files.
"""
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__email__ = "wardefar@iro"
__maintainer__ = "David Warde-Farley"
import struct
import numpy
MNIST_IMAGE_MAGIC = 2051
MNIST_LABEL_MAGIC = 2049
class open_if_filename(object):
"""
.. todo::
WRITEME
Parameters
----------
f : WRITEME
mode : WRITEME
buffering : WRITEME
"""
def __init__(self, f, mode='r', buffering=-1):
self._f = f
self._mode = mode
self._buffering = buffering
self._handle = None
def __enter__(self):
"""
.. todo::
WRITEME
"""
if isinstance(self._f, basestring):
self._handle = open(self._f, self._mode, self._buffering)
else:
self._handle = self._f
return self._handle
def __exit__(self, exc_type, exc_value, traceback):
"""
.. todo::
WRITEME
"""
if self._handle is not self._f:
self._handle.close()
def read_mnist_images(fn, dtype=None):
"""
Read MNIST images from the original ubyte file format.
Parameters
----------
fn : str or object
Filename/path from which to read labels, or an open file
object for the same (will not be closed for you).
dtype : str or object, optional
A NumPy dtype or string that can be converted to one.
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : ndarray, shape (n_images, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was boolean, the resulting array will
be boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float or complex dtype, the values
will be mapped to the unit interval [0, 1], with pixel values
that were 255 in the original unsigned byte representation
equal to 1.0.
"""
with open_if_filename(fn, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError('wrong magic number reading MNIST image file: ' +
fn)
array = numpy.fromfile(f, dtype='uint8').reshape((number, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
# If the user wants booleans, threshold at half the range.
if dtype.kind is 'b':
array = array >= 128
else:
# Otherwise, just convert.
array = array.astype(dtype)
# I don't know why you'd ever turn MNIST into complex,
# but just in case, check for float *or* complex dtypes.
# Either way, map to the unit interval.
if dtype.kind in ('f', 'c'):
array /= 255.
return array
def read_mnist_labels(fn):
"""
Read MNIST labels from the original ubyte file format.
Parameters
----------
fn : str or object
Filename/path from which to read labels, or an open file
object for the same (will not be closed for you).
Returns
-------
labels : ndarray, shape (nlabels,)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with open_if_filename(fn, 'rb') as f:
magic, number = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError('wrong magic number reading MNIST label file: ' +
fn)
array = numpy.fromfile(f, dtype='uint8')
return array
|
xkortex/SigFlux | refs/heads/master | sigflux/wave/custom_cwt.py | 1 | import numpy as np
from scipy import signal
from scipy.signal import convolve, fftconvolve
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
"""
wavelet_data = wavelet(10, 2)
output = np.zeros([len(widths), len(data)], dtype=wavelet_data.dtype)
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = fftconvolve(data, wavelet_data,
mode='same')
return output |
tmpgit/intellij-community | refs/heads/master | python/testData/MockSdk3.2/python_stubs/sys.py | 100 | # encoding: utf-8
# module sys
# from (built-in)
# by generator 1.135
"""
This module provides access to some objects used or maintained by the
interpreter and to functions that interact strongly with the interpreter.
Dynamic objects:
argv -- command line arguments; argv[0] is the script pathname if known
path -- module search path; path[0] is the script directory, else ''
modules -- dictionary of loaded modules
displayhook -- called to show results in an interactive session
excepthook -- called to handle any uncaught exception other than SystemExit
To customize printing in an interactive session or to install a custom
top-level exception handler, assign other functions to replace these.
stdin -- standard input file object; used by input()
stdout -- standard output file object; used by print()
stderr -- standard error object; used for error messages
By assigning other file objects (or objects that behave like files)
to these, it is possible to redirect all of the interpreter's I/O.
last_type -- type of last uncaught exception
last_value -- value of last uncaught exception
last_traceback -- traceback of last uncaught exception
These three are only available in an interactive session after a
traceback has been printed.
Static objects:
builtin_module_names -- tuple of module names built into this interpreter
copyright -- copyright notice pertaining to this interpreter
exec_prefix -- prefix used to find the machine-specific Python library
executable -- absolute path of the executable binary of the Python interpreter
float_info -- a struct sequence with information about the float implementation.
float_repr_style -- string indicating the style of repr() output for floats
hash_info -- a struct sequence with information about the hash algorithm.
hexversion -- version information encoded as a single integer
implementation -- Python implementation information.
int_info -- a struct sequence with information about the int implementation.
maxsize -- the largest supported length of containers.
maxunicode -- the value of the largest Unicode codepoint
platform -- platform identifier
prefix -- prefix used to find the Python library
thread_info -- a struct sequence with information about the thread implementation.
version -- the version of this interpreter as a string
version_info -- version information as a named tuple
dllhandle -- [Windows only] integer handle of the Python DLL
winver -- [Windows only] version number of the Python DLL
__stdin__ -- the original stdin; don't touch!
__stdout__ -- the original stdout; don't touch!
__stderr__ -- the original stderr; don't touch!
__displayhook__ -- the original displayhook; don't touch!
__excepthook__ -- the original excepthook; don't touch!
Functions:
displayhook() -- print an object to the screen, and save it in builtins._
excepthook() -- print an exception and its traceback to sys.stderr
exc_info() -- return thread-safe information about the current exception
exit() -- exit the interpreter by raising SystemExit
getdlopenflags() -- returns flags to be used for dlopen() calls
getprofile() -- get the global profiling function
getrefcount() -- return the reference count for an object (plus one :-)
getrecursionlimit() -- return the max recursion depth for the interpreter
getsizeof() -- return the size of an object in bytes
gettrace() -- get the global debug tracing function
setcheckinterval() -- control how often the interpreter checks for events
setdlopenflags() -- set the flags to be used for dlopen() calls
setprofile() -- set the global profiling function
setrecursionlimit() -- set the max recursion depth for the interpreter
settrace() -- set the global debug tracing function
"""
# no imports
# Variables with simple values
api_version = 1013
base_exec_prefix = 'C:\\Python34'
base_prefix = 'C:\\Python34'
byteorder = 'little'
copyright = 'Copyright (c) 2001-2014 Python Software Foundation.\nAll Rights Reserved.\n\nCopyright (c) 2000 BeOpen.com.\nAll Rights Reserved.\n\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\nAll Rights Reserved.\n\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\nAll Rights Reserved.'
dllhandle = 1705771008
dont_write_bytecode = False
executable = 'C:\\Python34\\python.exe'
exec_prefix = 'C:\\Python34'
float_repr_style = 'short'
hexversion = 50594288
maxsize = 2147483647
maxunicode = 1114111
platform = 'win32'
prefix = 'C:\\Python34'
version = '3.4.1 (v3.4.1:c0e311e010fc, May 18 2014, 10:38:22) [MSC v.1600 32 bit (Intel)]'
winver = '3.4'
_home = None
__egginsert = 1
__plen = 5
# functions
def callstats(): # real signature unknown; restored from __doc__
"""
callstats() -> tuple of integers
Return a tuple of function call statistics, if CALL_PROFILE was defined
when Python was built. Otherwise, return None.
When enabled, this function returns detailed, implementation-specific
details about the number of function calls executed. The return value is
a 11-tuple where the entries in the tuple are counts of:
0. all function calls
1. calls to PyFunction_Type objects
2. PyFunction calls that do not create an argument tuple
3. PyFunction calls that do not create an argument tuple
and bypass PyEval_EvalCodeEx()
4. PyMethod calls
5. PyMethod calls on bound methods
6. PyType calls
7. PyCFunction calls
8. generator calls
9. All other calls
10. Number of stack pops performed by call_function()
"""
return ()
def call_tracing(func, args): # real signature unknown; restored from __doc__
"""
call_tracing(func, args) -> object
Call func(*args), while tracing is enabled. The tracing state is
saved, and restored afterwards. This is intended to be called from
a debugger from a checkpoint, to recursively debug some other code.
"""
return object()
def displayhook(p_object): # real signature unknown; restored from __doc__
"""
displayhook(object) -> None
Print an object to sys.stdout and also save it in builtins._
"""
pass
def excepthook(exctype, value, traceback): # real signature unknown; restored from __doc__
"""
excepthook(exctype, value, traceback) -> None
Handle an exception by displaying it with a traceback on sys.stderr.
"""
pass
def exc_info(): # real signature unknown; restored from __doc__
"""
exc_info() -> (type, value, traceback)
Return information about the most recent exception caught by an except
clause in the current stack frame or in an older stack frame.
"""
pass
def exit(status=None): # real signature unknown; restored from __doc__
"""
exit([status])
Exit the interpreter by raising SystemExit(status).
If the status is omitted or None, it defaults to zero (i.e., success).
If the status is an integer, it will be used as the system exit status.
If it is another kind of object, it will be printed and the system
exit status will be one (i.e., failure).
"""
pass
def getallocatedblocks(): # real signature unknown; restored from __doc__
"""
getallocatedblocks() -> integer
Return the number of memory blocks currently allocated, regardless of their
size.
"""
return 0
def getcheckinterval(): # real signature unknown; restored from __doc__
""" getcheckinterval() -> current check interval; see setcheckinterval(). """
pass
def getdefaultencoding(): # real signature unknown; restored from __doc__
"""
getdefaultencoding() -> string
Return the current default string encoding used by the Unicode
implementation.
"""
return ""
def getfilesystemencoding(): # real signature unknown; restored from __doc__
"""
getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames.
"""
return ""
def getprofile(): # real signature unknown; restored from __doc__
"""
getprofile()
Return the profiling function set with sys.setprofile.
See the profiler chapter in the library manual.
"""
pass
def getrecursionlimit(): # real signature unknown; restored from __doc__
"""
getrecursionlimit()
Return the current value of the recursion limit, the maximum depth
of the Python interpreter stack. This limit prevents infinite
recursion from causing an overflow of the C stack and crashing Python.
"""
pass
def getrefcount(p_object): # real signature unknown; restored from __doc__
"""
getrefcount(object) -> integer
Return the reference count of object. The count returned is generally
one higher than you might expect, because it includes the (temporary)
reference as an argument to getrefcount().
"""
return 0
def getsizeof(p_object, default): # real signature unknown; restored from __doc__
"""
getsizeof(object, default) -> int
Return the size of object in bytes.
"""
return 0
def getswitchinterval(): # real signature unknown; restored from __doc__
""" getswitchinterval() -> current thread switch interval; see setswitchinterval(). """
pass
def gettrace(): # real signature unknown; restored from __doc__
"""
gettrace()
Return the global debug tracing function set with sys.settrace.
See the debugger chapter in the library manual.
"""
pass
def getwindowsversion(): # real signature unknown; restored from __doc__
"""
getwindowsversion()
Return information about the running version of Windows as a named tuple.
The members are named: major, minor, build, platform, service_pack,
service_pack_major, service_pack_minor, suite_mask, and product_type. For
backward compatibility, only the first 5 items are available by indexing.
All elements are numbers, except service_pack which is a string. Platform
may be 0 for win32s, 1 for Windows 9x/ME, 2 for Windows NT/2000/XP/Vista/7,
3 for Windows CE. Product_type may be 1 for a workstation, 2 for a domain
controller, 3 for a server.
"""
pass
def intern(string): # real signature unknown; restored from __doc__
"""
intern(string) -> string
``Intern'' the given string. This enters the string in the (global)
table of interned strings whose purpose is to speed up dictionary lookups.
Return the string itself or the previously interned string object with the
same value.
"""
return ""
def setcheckinterval(n): # real signature unknown; restored from __doc__
"""
setcheckinterval(n)
Tell the Python interpreter to check for asynchronous events every
n instructions. This also affects how often thread switches occur.
"""
pass
def setprofile(function): # real signature unknown; restored from __doc__
"""
setprofile(function)
Set the profiling function. It will be called on each function call
and return. See the profiler chapter in the library manual.
"""
pass
def setrecursionlimit(n): # real signature unknown; restored from __doc__
"""
setrecursionlimit(n)
Set the maximum depth of the Python interpreter stack to n. This
limit prevents infinite recursion from causing an overflow of the C
stack and crashing Python. The highest possible limit is platform-
dependent.
"""
pass
def setswitchinterval(n): # real signature unknown; restored from __doc__
"""
setswitchinterval(n)
Set the ideal thread switching delay inside the Python interpreter
The actual frequency of switching threads can be lower if the
interpreter executes long sequences of uninterruptible code
(this is implementation-specific and workload-dependent).
The parameter must represent the desired switching delay in seconds
A typical value is 0.005 (5 milliseconds).
"""
pass
def settrace(function): # real signature unknown; restored from __doc__
"""
settrace(function)
Set the global debug tracing function. It will be called on each
function call. See the debugger chapter in the library manual.
"""
pass
def _clear_type_cache(): # real signature unknown; restored from __doc__
"""
_clear_type_cache() -> None
Clear the internal type lookup cache.
"""
pass
def _current_frames(): # real signature unknown; restored from __doc__
"""
_current_frames() -> dictionary
Return a dictionary mapping each current thread T's thread id to T's
current stack frame.
This function should be used for specialized purposes only.
"""
return {}
def _debugmallocstats(): # real signature unknown; restored from __doc__
"""
_debugmallocstats()
Print summary info to stderr about the state of
pymalloc's structures.
In Py_DEBUG mode, also perform some expensive internal consistency
checks.
"""
pass
def _getframe(depth=None): # real signature unknown; restored from __doc__
"""
_getframe([depth]) -> frameobject
Return a frame object from the call stack. If optional integer depth is
given, return the frame object that many calls below the top of the stack.
If that is deeper than the call stack, ValueError is raised. The default
for depth is zero, returning the frame at the top of the call stack.
This function should be used for internal and specialized
purposes only.
"""
pass
def __displayhook__(*args, **kwargs): # real signature unknown
"""
displayhook(object) -> None
Print an object to sys.stdout and also save it in builtins._
"""
pass
def __excepthook__(*args, **kwargs): # real signature unknown
"""
excepthook(exctype, value, traceback) -> None
Handle an exception by displaying it with a traceback on sys.stderr.
"""
pass
def __interactivehook__(): # reliably restored by inspect
# no doc
pass
# classes
from .object import object
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
""" Load a built-in module. """
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
argv = [] # real value of type <class 'list'> skipped
builtin_module_names = () # real value of type <class 'tuple'> skipped
flags = (
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
)
float_info = (
1.7976931348623157e+308,
1024,
308,
2.2250738585072014e-308,
-1021,
-307,
15,
53,
2.220446049250313e-16,
2,
1,
)
hash_info = (
32,
2147483647,
314159,
0,
1000003,
'siphash24',
64,
128,
0,
)
implementation = None # (!) real value is ''
int_info = (
15,
2,
)
meta_path = [
__loader__,
None, # (!) real value is ''
None, # (!) real value is ''
None, # (!) real value is ''
]
modules = {} # real value of type <class 'dict'> skipped
path = [
'C:\\work\\ultimate\\out\\classes\\production\\python-helpers',
'C:\\Python34\\lib\\site-packages\\setuptools-4.0.1-py3.4.egg',
'C:\\Windows\\system32\\python34.zip',
'C:\\Python34\\DLLs',
'C:\\Python34\\lib',
'C:\\Python34',
'C:\\Python34\\lib\\site-packages',
]
path_hooks = [
None, # (!) real value is ''
None, # (!) real value is ''
]
path_importer_cache = {} # real value of type <class 'dict'> skipped
stderr = None # (!) real value is ''
stdin = None # (!) real value is ''
stdout = None # (!) real value is ''
thread_info = (
'nt',
None,
None,
)
version_info = (
3,
4,
1,
'final',
0,
)
warnoptions = []
_mercurial = (
'CPython',
'v3.4.1',
'c0e311e010fc',
)
_xoptions = {}
__spec__ = None # (!) real value is ''
__stderr__ = stderr
__stdin__ = stdin
__stdout__ = stdout
# intermittent names
exc_value = Exception()
exc_traceback=None
|
pymedusa/SickRage | refs/heads/master | ext/github/Team.py | 2 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Jan Orel <jan.orel@gooddata.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Aron Culotta <aronwc@gmail.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2016 mattjmorrison <mattjmorrison@mattjmorrison.com> #
# Copyright 2018 Isuru Fernando <isuruf@gmail.com> #
# Copyright 2018 Jacopo Notarstefano <jacopo.notarstefano@gmail.com> #
# Copyright 2018 James D'Amato <james.j.damato@gmail.com> #
# Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> #
# Copyright 2018 Manu Hortet <manuhortet@gmail.com> #
# Copyright 2018 Michał Górny <mgorny@gentoo.org> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Tim Boring <tboring@hearst.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import absolute_import
import six
import github.GithubObject
import github.NamedUser
import github.Organization
import github.PaginatedList
import github.Repository
import github.TeamDiscussion
from . import Consts
class Team(github.GithubObject.CompletableGithubObject):
"""
This class represents Teams. The reference can be found here http://developer.github.com/v3/orgs/teams/
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "name": self._name.value})
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def members_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._members_count)
return self._members_count.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def description(self):
"""
:type: string
"""
self._completeIfNotSet(self._description)
return self._description.value
@property
def permission(self):
"""
:type: string
"""
self._completeIfNotSet(self._permission)
return self._permission.value
@property
def repos_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._repos_count)
return self._repos_count.value
@property
def repositories_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repositories_url)
return self._repositories_url.value
@property
def slug(self):
"""
:type: string
"""
self._completeIfNotSet(self._slug)
return self._slug.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def organization(self):
"""
:type: :class:`github.Organization.Organization`
"""
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def privacy(self):
"""
:type: string
"""
self._completeIfNotSet(self._privacy)
return self._privacy.value
def add_to_members(self, member):
"""
This API call is deprecated. Use `add_membership` instead.
https://developer.github.com/v3/teams/members/#deprecation-notice-1
:calls: `PUT /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/members/" + member._identity
)
def add_membership(self, member, role=github.GithubObject.NotSet):
"""
:calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.Nameduser.NamedUser`
:param role: string
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
assert role is github.GithubObject.NotSet or isinstance(
role, (str, six.text_type)
), role
if role is not github.GithubObject.NotSet:
assert role in ["member", "maintainer"]
put_parameters = {
"role": role,
}
else:
put_parameters = {
"role": "member",
}
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/memberships/" + member._identity, input=put_parameters
)
def add_to_repos(self, repo):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/repos/" + repo._identity
)
def set_repo_permission(self, repo, permission):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
put_parameters = {
"permission": permission,
}
headers, data = self._requester.requestJsonAndCheck(
"PUT", self.url + "/repos/" + repo._identity, input=put_parameters
)
def delete(self):
"""
:calls: `DELETE /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self,
name,
description=github.GithubObject.NotSet,
permission=github.GithubObject.NotSet,
privacy=github.GithubObject.NotSet,
):
"""
:calls: `PATCH /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param name: string
:param description: string
:param permission: string
:param privacy: string
:rtype: None
"""
assert isinstance(name, (str, six.text_type)), name
assert description is github.GithubObject.NotSet or isinstance(
description, (str, six.text_type)
), description
assert permission is github.GithubObject.NotSet or isinstance(
permission, (str, six.text_type)
), permission
assert privacy is github.GithubObject.NotSet or isinstance(
privacy, (str, six.text_type)
), privacy
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if permission is not github.GithubObject.NotSet:
post_parameters["permission"] = permission
if privacy is not github.GithubObject.NotSet:
post_parameters["privacy"] = privacy
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
self._useAttributes(data)
def get_discussions(self):
"""
:calls: `GET /teams/:id/discussions <https://developer.github.com/v3/teams/discussions/#list-discussions>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.TeamDiscussion.TeamDiscussion`
"""
return github.PaginatedList.PaginatedList(
github.TeamDiscussion.TeamDiscussion,
self._requester,
self.url + "/discussions",
None,
headers={"Accept": Consts.mediaTypeTeamDiscussionsPreview},
)
def get_members(self, role=github.GithubObject.NotSet):
"""
:calls: `GET /teams/:id/members <https://developer.github.com/v3/teams/members/#list-team-members>`_
:param role: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
assert role is github.GithubObject.NotSet or isinstance(
role, (str, six.text_type)
), role
url_parameters = dict()
if role is not github.GithubObject.NotSet:
assert role in ["member", "maintainer", "all"]
url_parameters["role"] = role
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
url_parameters,
)
def get_repos(self):
"""
:calls: `GET /teams/:id/repos <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository, self._requester, self.url + "/repos", None
)
def invitations(self):
"""
:calls: `GET /teams/:id/invitations <https://developer.github.com/v3/teams/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/invitations",
None,
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
)
def has_in_members(self, member):
"""
:calls: `GET /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson(
"GET", self.url + "/members/" + member._identity
)
return status == 204
def has_in_repos(self, repo):
"""
:calls: `GET /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(repo, github.Repository.Repository), repo
status, headers, data = self._requester.requestJson(
"GET", self.url + "/repos/" + repo._identity
)
return status == 204
def remove_membership(self, member):
"""
:calls: `DELETE /teams/:team_id/memberships/:username <https://developer.github.com/v3/teams/members/#remove-team-membership>`
:param member:
:return:
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/memberships/" + member._identity
)
def remove_from_members(self, member):
"""
This API call is deprecated. Use `remove_membership` instead:
https://developer.github.com/v3/teams/members/#deprecation-notice-2
:calls: `DELETE /teams/:id/members/:user <http://developer.github.com/v3/orgs/teams>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/members/" + member._identity
)
def remove_from_repos(self, repo):
"""
:calls: `DELETE /teams/:id/repos/:owner/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"DELETE", self.url + "/repos/" + repo._identity
)
@property
def _identity(self):
return self.id
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._members_count = github.GithubObject.NotSet
self._members_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._description = github.GithubObject.NotSet
self._permission = github.GithubObject.NotSet
self._repos_count = github.GithubObject.NotSet
self._repositories_url = github.GithubObject.NotSet
self._slug = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._organization = github.GithubObject.NotSet
self._privacy = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "members_count" in attributes: # pragma no branch
self._members_count = self._makeIntAttribute(attributes["members_count"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "permission" in attributes: # pragma no branch
self._permission = self._makeStringAttribute(attributes["permission"])
if "repos_count" in attributes: # pragma no branch
self._repos_count = self._makeIntAttribute(attributes["repos_count"])
if "repositories_url" in attributes: # pragma no branch
self._repositories_url = self._makeStringAttribute(
attributes["repositories_url"]
)
if "slug" in attributes: # pragma no branch
self._slug = self._makeStringAttribute(attributes["slug"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(
github.Organization.Organization, attributes["organization"]
)
if "privacy" in attributes: # pragma no branch
self._privacy = self._makeStringAttribute(attributes["privacy"])
|
awkspace/ansible | refs/heads/devel | lib/ansible/modules/network/system/net_system.py | 26 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_system
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage the system attributes on network devices
description:
- This module provides declarative management of node system attributes
on network devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: network_agnostic
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- Provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain name
net_system:
hostname: ios01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
net_system:
state: absent
- name: configure DNS lookup sources
net_system:
lookup_source: MgmtEth0/0/CPU0/0
- name: configure name servers
net_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- hostname ios01
- ip domain name test.example.com
"""
|
QingChenmsft/azure-cli | refs/heads/master | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_client_factory.py | 3 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def _compute_client_factory(**_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(ResourceType.MGMT_COMPUTE)
def cf_ni(_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
# TODO: Remove hard coded api-version once
# https://github.com/Azure/azure-rest-api-specs/issues/570
# is fixed.
ni = get_mgmt_service_client(ResourceType.MGMT_NETWORK).network_interfaces
ni.api_version = '2016-03-30'
return ni
def cf_public_ip_addresses():
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
public_ip_ops = get_mgmt_service_client(ResourceType.MGMT_NETWORK).public_ip_addresses
return public_ip_ops
def cf_avail_set(_):
return _compute_client_factory().availability_sets
def cf_vm(_):
return _compute_client_factory().virtual_machines
def cf_vm_ext(_):
return _compute_client_factory().virtual_machine_extensions
def cf_vm_ext_image(_):
return _compute_client_factory().virtual_machine_extension_images
def cf_vm_image(_):
return _compute_client_factory().virtual_machine_images
def cf_usage(_):
return _compute_client_factory().usage
def cf_vmss(_):
return _compute_client_factory().virtual_machine_scale_sets
def cf_vmss_vm(_):
return _compute_client_factory().virtual_machine_scale_set_vms
def cf_vm_sizes(_):
return _compute_client_factory().virtual_machine_sizes
def cf_disks(_):
return _compute_client_factory().disks
def cf_snapshots(_):
return _compute_client_factory().snapshots
def cf_images(_):
return _compute_client_factory().images
def cf_run_commands(_):
return _compute_client_factory().virtual_machine_run_commands
|
sysalexis/kbengine | refs/heads/master | kbe/src/lib/python/Lib/test/double_const.py | 203 | from test.support import TestFailed
# A test for SF bug 422177: manifest float constants varied way too much in
# precision depending on whether Python was loading a module for the first
# time, or reloading it from a precompiled .pyc. The "expected" failure
# mode is that when test_import imports this after all .pyc files have been
# erased, it passes, but when test_import imports this from
# double_const.pyc, it fails. This indicates a woeful loss of precision in
# the marshal format for doubles. It's also possible that repr() doesn't
# produce enough digits to get reasonable precision for this box.
PI = 3.14159265358979324
TWOPI = 6.28318530717958648
PI_str = "3.14159265358979324"
TWOPI_str = "6.28318530717958648"
# Verify that the double x is within a few bits of eval(x_str).
def check_ok(x, x_str):
assert x > 0.0
x2 = eval(x_str)
assert x2 > 0.0
diff = abs(x - x2)
# If diff is no larger than 3 ULP (wrt x2), then diff/8 is no larger
# than 0.375 ULP, so adding diff/8 to x2 should have no effect.
if x2 + (diff / 8.) != x2:
raise TestFailed("Manifest const %s lost too much precision " % x_str)
check_ok(PI, PI_str)
check_ok(TWOPI, TWOPI_str)
|
publicRoman/spark | refs/heads/branch-2.2-kubernetes | python/pyspark/sql/group.py | 60 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import Column, _to_seq, _to_java_column, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, sql_ctx):
self._jgd = jgd
self.sql_ctx = sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions are `avg`, `max`, `min`, `sum`, `count`.
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current [[DataFrame]] and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
vasyarv/edx-ora2 | refs/heads/master | openassessment/xblock/xml.py | 5 | """
Serialize and deserialize OpenAssessment XBlock content to/from XML.
"""
from uuid import uuid4 as uuid
import lxml.etree as etree
import pytz
import dateutil.parser
import defusedxml.ElementTree as safe_etree
from data_conversion import update_assessments_format
from defaults import DEFAULT_RUBRIC_FEEDBACK_TEXT
class UpdateFromXmlError(Exception):
"""
Error occurred while deserializing the OpenAssessment XBlock content from XML.
"""
pass
class ValidationError(UpdateFromXmlError):
"""
The XML definition is not semantically valid.
"""
pass
def _sort_by_order_num(items):
"""
Sort dictionaries by the key "order_num".
If no order number is specified, assign an arbitrary order.
Ignores non-dict items in the list.
Args:
items (list): List of dictionaries to sort.
Returns:
dict
"""
return sorted([
el for el in items
if isinstance(el, dict)
], key=lambda el: el.get('order_num', 0)
)
def _safe_get_text(element):
"""
Retrieve the text from the element, safely handling empty elements.
Args:
element (lxml.etree.Element): The XML element.
Returns:
unicode
"""
return unicode(element.text) if element.text is not None else u""
def _serialize_prompts(prompts_root, prompts_list):
"""
Serialize prompts as XML, adding children to the XML with root
node `prompts_root`.
Args:
prompts_root (lxml.etree.Element): The root node of the tree.
prompts_list (list): List of prompt dictionaries.
Returns:
None
"""
if not isinstance(prompts_list, list):
return
for prompt in prompts_list:
prompt_el = etree.SubElement(prompts_root, 'prompt')
# Prompt description
prompt_description = etree.SubElement(prompt_el, 'description')
prompt_description.text = unicode(prompt.get('description', u''))
def _serialize_options(options_root, options_list):
"""
Serialize rubric criterion options as XML, adding children to the XML
with root node `options_root`.
We don't make any assumptions about the contents of `options_list`,
and we handle unexpected inputs gracefully.
Args:
options_root (lxml.etree.Element): The root node of the tree.
options_list (list): List of options dictionaries.
Returns:
None
"""
# Sort the options by order number, then serialize as XML
for option in _sort_by_order_num(options_list):
option_el = etree.SubElement(options_root, 'option')
# Points (default to 0)
option_el.set('points', unicode(option.get('points', 0)))
# Name (default to a UUID)
option_name = etree.SubElement(option_el, 'name')
if 'name' in option:
option_name.text = unicode(option['name'])
else:
option_name.text = unicode(uuid().hex)
# Label (default to the option name, then an empty string)
option_label = etree.SubElement(option_el, 'label')
option_label.text = unicode(option.get('label', option.get('name', u'')))
# Explanation (default to empty str)
option_explanation = etree.SubElement(option_el, 'explanation')
option_explanation.text = unicode(option.get('explanation', u''))
def _serialize_criteria(criteria_root, criteria_list):
"""
Serialize rubric criteria as XML, adding children to the XML
with root node `criteria_root`.
We don't make any assumptions about the contents of `criteria_list`,
and we handle unexpected inputs gracefully.
Args:
critera_root (lxml.etree.Element): The root node of the tree.
criteria_list (list): List of criteria dictionaries.
Returns:
None
"""
# Sort the criteria by order number, then serialize as XML
for criterion in _sort_by_order_num(criteria_list):
criterion_el = etree.SubElement(criteria_root, 'criterion')
# Criterion name (default to a UUID)
criterion_name = etree.SubElement(criterion_el, u'name')
if 'name' in criterion:
criterion_name.text = unicode(criterion['name'])
else:
criterion_name.text = unicode(uuid().hex)
# Criterion label (default to the name, then an empty string)
criterion_label = etree.SubElement(criterion_el, 'label')
criterion_label.text = unicode(criterion.get('label', criterion.get('name', u'')))
# Criterion prompt (default to empty string)
criterion_prompt = etree.SubElement(criterion_el, 'prompt')
criterion_prompt.text = unicode(criterion.get('prompt', u''))
# Criterion feedback disabled, optional, or required
# If disabled, do not set the attribute.
if criterion.get('feedback') in ["optional", "required"]:
criterion_el.set('feedback', criterion['feedback'])
# Criterion options
options_list = criterion.get('options', None)
if isinstance(options_list, list):
_serialize_options(criterion_el, options_list)
def serialize_rubric(rubric_root, oa_block):
"""
Serialize a rubric dictionary as XML, adding children to the XML
with root node `rubric_root`.
This is very liberal in what it accepts. If the rubric dict persisted
by the XBlock is invalid for some reason, we still want to generate XML
so that Studio authors can fix the error.
Args:
oa_block (OpenAssessmentBlock): The OpenAssessmentBlock to serialize
rubric_dict (dict): A dictionary representation of the rubric, of the form
described in the serialized Rubric model (peer grading serializers).
Returns:
None
"""
# Criteria
criteria_list = oa_block.rubric_criteria
if isinstance(criteria_list, list):
_serialize_criteria(rubric_root, criteria_list)
if oa_block.rubric_feedback_prompt is not None:
feedback_prompt = etree.SubElement(rubric_root, 'feedbackprompt')
feedback_prompt.text = unicode(oa_block.rubric_feedback_prompt)
if oa_block.rubric_feedback_default_text is not None:
feedback_text = etree.SubElement(rubric_root, 'feedback_default_text')
feedback_text.text = unicode(oa_block.rubric_feedback_default_text)
def parse_date(date_str, name=""):
"""
Attempt to parse a date string into ISO format (without milliseconds)
Returns `None` if this cannot be done.
Args:
date_str (str): The date string to parse.
Kwargs:
name (str): the name to return in an error to the origin of the call if an error occurs.
Returns:
unicode in ISO format (without milliseconds) if the date string is
parse-able. None if parsing fails.
Raises:
UpdateFromXmlError
"""
if date_str == "":
return None
try:
# Get the date into ISO format
parsed_date = dateutil.parser.parse(unicode(date_str)).replace(tzinfo=pytz.utc)
formatted_date = parsed_date.strftime("%Y-%m-%dT%H:%M:%S")
return unicode(formatted_date)
except (ValueError, TypeError):
msg = (
'The format of the given date ({date}) for the {name} is invalid. '
'Make sure the date is formatted as YYYY-MM-DDTHH:MM:SS.'
).format(date=date_str, name=name)
raise UpdateFromXmlError(msg)
def _parse_boolean(boolean_str):
"""
Attempt to parse a boolean string into a boolean value. Leniently accepts
both 'True' and 'true', but is otherwise declared false.
Args:
boolean_str (unicode): The boolean string to parse.
Returns:
The boolean value of the string. True if the string equals 'True' or
'true'
"""
return boolean_str in ['True', 'true']
def _parse_prompts_xml(root):
"""
Parse <prompts> element in the OpenAssessment XBlock's content XML.
Args:
root (lxml.etree.Element): The root node of the tree.
Returns:
list of prompts dictionaries.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
"""
prompts_list = []
prompts_el = root.find('prompts')
if prompts_el is not None:
for prompt in prompts_el.findall('prompt'):
prompt_dict = dict()
# Prompt description
prompt_description = prompt.find('description')
if prompt_description is not None:
prompt_dict['description'] = _safe_get_text(prompt_description)
else:
raise UpdateFromXmlError('Every "prompt" element must contain a "description" element.')
prompts_list.append(prompt_dict)
else:
# For backwards compatibility. Initially a single prompt element was added in
# the rubric element.
rubric_el = root.find('rubric')
prompt_el = rubric_el.find('prompt')
prompt_description = ''
if prompt_el is not None:
prompt_description = _safe_get_text(prompt_el)
prompts_list.append(
{
'description': prompt_description,
}
)
return prompts_list
def _parse_options_xml(options_root):
"""
Parse <options> element in the OpenAssessment XBlock's content XML.
Args:
options_root (lxml.etree.Element): The root of the tree.
Returns:
list of option dictionaries, as defined in the Rubric model of the peer grading app.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
"""
options_list = []
order_num = 0
for option in options_root.findall('option'):
option_dict = dict()
# Option order number (sequential)
option_dict['order_num'] = order_num
order_num += 1
# Option points -- must be an integer!
if 'points' in option.attrib:
try:
option_dict['points'] = int(option.get('points'))
except ValueError:
raise UpdateFromXmlError('The value for "points" must be an integer.')
else:
raise UpdateFromXmlError('Every "option" element must contain a "points" attribute.')
# Option name
option_name = option.find('name')
if option_name is not None:
option_dict['name'] = _safe_get_text(option_name)
else:
raise UpdateFromXmlError('Every "option" element must contain a "name" element.')
# Option label
# Backwards compatibility: Older problem definitions won't have this.
# If no label is defined, default to the option name.
option_label = option.find('label')
option_dict['label'] = (
_safe_get_text(option_label)
if option_label is not None
else option_dict['name']
)
# Option explanation
option_explanation = option.find('explanation')
if option_explanation is not None:
option_dict['explanation'] = _safe_get_text(option_explanation)
else:
raise UpdateFromXmlError('Every "option" element must contain an "explanation" element.')
# Add the options dictionary to the list
options_list.append(option_dict)
return options_list
def _parse_criteria_xml(criteria_root):
"""
Parse <criteria> element in the OpenAssessment XBlock's content XML.
Args:
criteria_root (lxml.etree.Element): The root node of the tree.
Returns:
list of criteria dictionaries, as defined in the Rubric model of the peer grading app.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
"""
criteria_list = []
order_num = 0
for criterion in criteria_root.findall('criterion'):
criterion_dict = dict()
# Criterion order number (sequential)
criterion_dict['order_num'] = order_num
order_num += 1
# Criterion name
criterion_name = criterion.find('name')
if criterion_name is not None:
criterion_dict['name'] = _safe_get_text(criterion_name)
else:
raise UpdateFromXmlError('Every "criterion" element must contain a "name" element.')
# Criterion label
# Backwards compatibility: Older problem definitions won't have this,
# so if it isn't set, default to the criterion name.
criterion_label = criterion.find('label')
criterion_dict['label'] = (
_safe_get_text(criterion_label)
if criterion_label is not None
else criterion_dict['name']
)
# Criterion prompt
criterion_prompt = criterion.find('prompt')
if criterion_prompt is not None:
criterion_dict['prompt'] = _safe_get_text(criterion_prompt)
else:
raise UpdateFromXmlError('Every "criterion" element must contain a "prompt" element.')
# Criterion feedback (disabled, optional, or required)
criterion_feedback = criterion.get('feedback', 'disabled')
if criterion_feedback in ['optional', 'disabled', 'required']:
criterion_dict['feedback'] = criterion_feedback
else:
raise UpdateFromXmlError('Invalid value for "feedback" attribute: if specified, it must be set set to "optional" or "required".')
# Criterion options
criterion_dict['options'] = _parse_options_xml(criterion)
# Add the newly constructed criterion dict to the list
criteria_list.append(criterion_dict)
return criteria_list
def parse_rubric_xml(rubric_root):
"""
Parse <rubric> element in the OpenAssessment XBlock's content XML.
Args:
rubric_root (lxml.etree.Element): The root of the <rubric> node in the tree.
Returns:
dict, a serialized representation of a rubric, as defined by the peer grading serializers.
Raises:
UpdateFromXmlError: The XML definition is invalid or the XBlock could not be updated.
InvalidRubricError: The rubric was not semantically valid.
"""
rubric_dict = dict()
feedback_prompt_el = rubric_root.find('feedbackprompt')
if feedback_prompt_el is not None:
rubric_dict['feedbackprompt'] = _safe_get_text(feedback_prompt_el)
else:
rubric_dict['feedbackprompt'] = None
feedback_text_el = rubric_root.find('feedback_default_text')
if feedback_text_el is not None:
rubric_dict['feedback_default_text'] = _safe_get_text(feedback_text_el)
else:
rubric_dict['feedback_default_text'] = None
# Criteria
rubric_dict['criteria'] = _parse_criteria_xml(rubric_root)
return rubric_dict
def parse_examples_xml(examples):
"""
Parse <example> (training examples) from the XML.
Args:
examples (list of lxml.etree.Element): The <example> elements to parse.
Returns:
list of example dicts
Raises:
UpdateFromXmlError
"""
examples_list = []
for example_el in examples:
example_dict = dict()
# Retrieve the answers from the training example
answers_list = list()
answer_elements = example_el.findall('answer')
if len(answer_elements) != 1:
raise UpdateFromXmlError(u'Each "example" element must contain exactly one "answer" element')
answer_part_elements = answer_elements[0].findall('part')
if len(answer_part_elements) > 0:
for answer_part_element in answer_part_elements:
answers_list.append(_safe_get_text(answer_part_element))
else:
# Initially example answers had only one part.
answers_list.append(_safe_get_text(answer_elements[0]))
example_dict['answer'] = {"parts": [{"text": text} for text in answers_list]}
# Retrieve the options selected from the training example
example_dict['options_selected'] = []
for select_el in example_el.findall('select'):
if 'criterion' not in select_el.attrib:
raise UpdateFromXmlError(u'Each "select" element must have a "criterion" attribute')
if 'option' not in select_el.attrib:
raise UpdateFromXmlError(u'Each "select" element must have an "option" attribute')
example_dict['options_selected'].append({
'criterion': unicode(select_el.get('criterion')),
'option': unicode(select_el.get('option'))
})
examples_list.append(example_dict)
return examples_list
def parse_assessments_xml(assessments_root):
"""
Parse the <assessments> element in the OpenAssessment XBlock's content XML.
Args:
assessments_root (lxml.etree.Element): The root of the <assessments> node in the tree.
Returns:
list of assessment dicts
Raises:
UpdateFromXmlError
"""
assessments_list = []
for assessment in assessments_root.findall('assessment'):
assessment_dict = dict()
# Assessment name
if 'name' in assessment.attrib:
assessment_dict['name'] = unicode(assessment.get('name'))
else:
raise UpdateFromXmlError('All "assessment" elements must contain a "name" element.')
# Assessment start
if 'start' in assessment.attrib:
# Example-based assessment is NOT allowed to have a start date
if assessment_dict['name'] == 'example-based-assessment':
raise UpdateFromXmlError('Example-based assessment cannot have a start date')
# Other assessment types CAN have a start date
parsed_start = parse_date(assessment.get('start'), name="{} start date".format(assessment_dict['name']))
if parsed_start is not None:
assessment_dict['start'] = parsed_start
else:
assessment_dict['start'] = None
# Assessment due
if 'due' in assessment.attrib:
# Example-based assessment is NOT allowed to have a due date
if assessment_dict['name'] == 'example-based-assessment':
raise UpdateFromXmlError('Example-based assessment cannot have a due date')
# Other assessment types CAN have a due date
parsed_due = parse_date(assessment.get('due'), name="{} due date".format(assessment_dict['name']))
if parsed_due is not None:
assessment_dict['due'] = parsed_due
else:
assessment_dict['due'] = None
# Assessment must_grade
if 'must_grade' in assessment.attrib:
try:
assessment_dict['must_grade'] = int(assessment.get('must_grade'))
except ValueError:
raise UpdateFromXmlError('The "must_grade" value must be a positive integer.')
# Assessment must_be_graded_by
if 'must_be_graded_by' in assessment.attrib:
try:
assessment_dict['must_be_graded_by'] = int(assessment.get('must_be_graded_by'))
except ValueError:
raise UpdateFromXmlError('The "must_be_graded_by" value must be a positive integer.')
# Training examples
examples = assessment.findall('example')
# Student training and AI Grading should always have examples set, even if it's an empty list.
# (Validation rules, applied later, are responsible for
# ensuring that users specify at least one example).
# All assessments except for Student Training and AI (example-based-assessment) types ignore examples.
if assessment_dict['name'] == 'student-training':
assessment_dict['examples'] = parse_examples_xml(examples)
if assessment_dict['name'] == 'example-based-assessment':
assessment_dict['examples'] = parse_examples_xml(examples)
assessment_dict['algorithm_id'] = unicode(assessment.get('algorithm_id', 'ease'))
# Update the list of assessments
assessments_list.append(assessment_dict)
return assessments_list
def serialize_training_examples(examples, assessment_el):
"""
Serialize a training example to XML.
Args:
examples (list of dict): List of example dictionaries.
assessment_el (lxml.etree.Element): The <assessment> XML element.
Returns:
None
"""
for example_dict in examples:
example_el = etree.SubElement(assessment_el, 'example')
# Answer provided in the example (default to empty string)
answer_el = etree.SubElement(example_el, 'answer')
for part in example_dict.get('answer', {}).get('parts', []):
part_el = etree.SubElement(answer_el, 'part')
part_el.text = unicode(part.get('text', u''))
# Options selected from the rubric
options_selected = example_dict.get('options_selected', [])
for selected_dict in options_selected:
select_el = etree.SubElement(example_el, 'select')
select_el.set('criterion', unicode(selected_dict.get('criterion', '')))
select_el.set('option', unicode(selected_dict.get('option', '')))
def serialize_assessments(assessments_root, oa_block):
"""
Serialize the assessment modules for an OpenAssessment XBlock.
Args:
assessments_root (lxml.etree.Element): The <assessments> XML element.
oa_block (OpenAssessmentXBlock): The XBlock with configuration to
serialize.
Returns:
None
"""
for assessment_dict in update_assessments_format(oa_block.rubric_assessments):
assessment = etree.SubElement(assessments_root, 'assessment')
# Set assessment attributes, defaulting to empty values
assessment.set('name', unicode(assessment_dict.get('name', '')))
if 'must_grade' in assessment_dict:
assessment.set('must_grade', unicode(assessment_dict['must_grade']))
if 'must_be_graded_by' in assessment_dict:
assessment.set('must_be_graded_by', unicode(assessment_dict['must_be_graded_by']))
if assessment_dict.get('start') is not None:
assessment.set('start', unicode(assessment_dict['start']))
if assessment_dict.get('due') is not None:
assessment.set('due', unicode(assessment_dict['due']))
if assessment_dict.get('algorithm_id') is not None:
assessment.set('algorithm_id', unicode(assessment_dict['algorithm_id']))
# Training examples
examples = assessment_dict.get('examples', [])
if not isinstance(examples, list):
examples = []
serialize_training_examples(examples, assessment)
def serialize_content_to_xml(oa_block, root):
"""
Serialize the OpenAssessment XBlock's content to XML.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
"""
root.tag = 'openassessment'
# Set the submission start date
if oa_block.submission_start is not None:
root.set('submission_start', unicode(oa_block.submission_start))
# Set submission due date
if oa_block.submission_due is not None:
root.set('submission_due', unicode(oa_block.submission_due))
# Set leaderboard show
if oa_block.leaderboard_show:
root.set('leaderboard_show', unicode(oa_block.leaderboard_show))
# Allow file upload
if oa_block.allow_file_upload is not None:
root.set('allow_file_upload', unicode(oa_block.allow_file_upload))
if oa_block.allow_latex is not None:
root.set('allow_latex', unicode(oa_block.allow_latex))
# Open assessment displayed title
title = etree.SubElement(root, 'title')
title.text = unicode(oa_block.title)
# Assessment list
assessments_root = etree.SubElement(root, 'assessments')
serialize_assessments(assessments_root, oa_block)
# Prompts
prompts_root = etree.SubElement(root, 'prompts')
_serialize_prompts(prompts_root, oa_block.prompts)
# Rubric
rubric_root = etree.SubElement(root, 'rubric')
serialize_rubric(rubric_root, oa_block)
def serialize_content(oa_block):
"""
Serialize the OpenAssessment XBlock's content to an XML string.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize.
Returns:
xml (unicode)
"""
root = etree.Element('openassessment')
serialize_content_to_xml(oa_block, root)
# Return a UTF-8 representation of the XML
return etree.tostring(root, pretty_print=True, encoding='unicode')
def serialize_rubric_to_xml_str(oa_block):
"""
Serialize the OpenAssessment XBlock's rubric into an XML string. This is
designed to serialize the XBlock's rubric specifically for authoring. Since
the authoring view splits the prompt from the rubric, the serialized format
for the rubric does not contain the prompt.
Args:
oa_block (OpenAssessmentBlock): The open assessment block to serialize
a rubric from.
Returns:
xml (unicode) representation of the Rubric.
"""
rubric_root = etree.Element('rubric')
serialize_rubric(rubric_root, oa_block)
return etree.tostring(rubric_root, pretty_print=True, encoding='unicode')
def serialize_examples_to_xml_str(assessment):
"""
Serializes the OpenAssessment XBlock's training examples into an XML unicode
string.
Args:
assessment (dict): Dictionary representation of an Assessment Module's
configuration. If this contains a list of examples, the examples
will be returned serialized.
Returns:
A unicode string of the XML serialized examples.
"""
examples = update_assessments_format([assessment])[0].get('examples', [])
if not isinstance(examples, list):
examples = []
examples_root = etree.Element('examples')
serialize_training_examples(examples, examples_root)
return etree.tostring(examples_root, pretty_print=True, encoding='unicode')
def serialize_assessments_to_xml_str(oa_block):
"""
Serializes the OpenAssessment XBlock's assessment modules into an XML
unicode string.
Args:
oa_block (OpenAssessmentBlock
"""
assessments_root = etree.Element('assessments')
serialize_assessments(assessments_root, oa_block)
return etree.tostring(assessments_root, pretty_print=True, encoding='unicode')
def parse_from_xml(root):
"""
Update the OpenAssessment XBlock's content from an XML definition.
We need to be strict about the XML we accept, to avoid setting
the XBlock to an invalid state (which will then be persisted).
Args:
root (lxml.etree.Element): The XML definition of the XBlock's content.
Returns:
A dictionary of all of the XBlock's content.
Raises:
UpdateFromXmlError: The XML definition is invalid
"""
# Check that the root has the correct tag
if root.tag != 'openassessment':
raise UpdateFromXmlError('Every open assessment problem must contain an "openassessment" element.')
# Retrieve the start date for the submission
# Set it to None by default; we will update it to the latest start date later on
submission_start = None
if 'submission_start' in root.attrib:
submission_start = parse_date(unicode(root.attrib['submission_start']), name="submission start date")
# Retrieve the due date for the submission
# Set it to None by default; we will update it to the earliest deadline later on
submission_due = None
if 'submission_due' in root.attrib:
submission_due = parse_date(unicode(root.attrib['submission_due']), name="submission due date")
allow_file_upload = False
if 'allow_file_upload' in root.attrib:
allow_file_upload = _parse_boolean(unicode(root.attrib['allow_file_upload']))
allow_latex = False
if 'allow_latex' in root.attrib:
allow_latex = _parse_boolean(unicode(root.attrib['allow_latex']))
# Retrieve the title
title_el = root.find('title')
if title_el is None:
raise UpdateFromXmlError('Every assessment must contain a "title" element.')
else:
title = _safe_get_text(title_el)
# Retrieve the rubric
rubric_el = root.find('rubric')
if rubric_el is None:
raise UpdateFromXmlError('Every assessment must contain a "rubric" element.')
else:
rubric = parse_rubric_xml(rubric_el)
# Retrieve the prompts
prompts = _parse_prompts_xml(root)
# Retrieve the leaderboard if it exists, otherwise set it to 0
leaderboard_show = 0
if 'leaderboard_show' in root.attrib:
try:
leaderboard_show = int(root.attrib['leaderboard_show'])
except (TypeError, ValueError):
raise UpdateFromXmlError('The leaderboard must have an integer value.')
# Retrieve the assessments
assessments_el = root.find('assessments')
if assessments_el is None:
raise UpdateFromXmlError('Every assessment must contain an "assessments" element.')
else:
assessments = parse_assessments_xml(assessments_el)
return {
'title': title,
'prompts': prompts,
'rubric_criteria': rubric['criteria'],
'rubric_assessments': assessments,
'rubric_feedback_prompt': rubric['feedbackprompt'],
'rubric_feedback_default_text': rubric['feedback_default_text'],
'submission_start': submission_start,
'submission_due': submission_due,
'allow_file_upload': allow_file_upload,
'allow_latex': allow_latex,
'leaderboard_show': leaderboard_show
}
def parse_from_xml_str(xml):
"""
Create a dictionary for the OpenAssessment XBlock's content from an XML
string definition. Parses the string using a library that avoids some known
security vulnerabilities in etree.
Args:
xml (unicode): The XML definition of the XBlock's content.
Returns:
A dictionary of all configuration values for the XBlock.
Raises:
UpdateFromXmlError: The XML definition is invalid.
InvalidRubricError: The rubric was not semantically valid.
InvalidAssessmentsError: The assessments are not semantically valid.
"""
return parse_from_xml(_unicode_to_xml(xml))
def _unicode_to_xml(xml):
"""
Converts unicode string to XML node.
Args:
xml (unicode): The XML definition of some XBlock configuration.
Raises:
UpdateFromXmlError: Raised when the XML definition is invalid.
"""
# Parse the XML content definition
# Use the defusedxml library implementation to avoid known security vulnerabilities in ElementTree:
# http://docs.python.org/2/library/xml.html#xml-vulnerabilities
try:
return safe_etree.fromstring(xml.encode('utf-8'))
except (ValueError, safe_etree.ParseError):
raise UpdateFromXmlError("An error occurred while parsing the XML content.")
def parse_examples_from_xml_str(xml):
"""
Converts an XML string of examples (Student Training or AI) into a dictionary
representing the same information.
Args:
xml (unicode): The XML definition of the examples
Returns
(list of dict): The example definition
"""
examples_root = _unicode_to_xml(xml)
examples = examples_root.findall('example')
return parse_examples_xml(examples)
|
grundprinzip/Impala | refs/heads/cdh5-trunk | tests/comparison/data_generator.py | 5 | #!/usr/bin/env python
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides random data generation and database population.
When this module is run directly for purposes of database population, the default is
to use a fixed seed for randomization. The result should be that the generated random
data is the same regardless of when or where the execution is done.
'''
from datetime import datetime, timedelta
from decimal import Decimal as PyDecimal
from logging import basicConfig, getLogger
from random import choice, randint, random, seed, uniform
from tests.comparison.db_connector import (
DbConnector,
HIVE,
IMPALA,
MYSQL,
ORACLE,
POSTGRESQL)
from tests.comparison.common import Column, Table
from tests.comparison.types import (
Boolean,
Char,
Decimal,
EXACT_TYPES,
Float,
get_char_class,
get_decimal_class,
get_varchar_class,
Int,
String,
Timestamp,
TYPES,
VarChar)
LOG = getLogger(__name__)
class RandomValGenerator(object):
'''This class will generate random data of various data types.'''
def __init__(self,
min_number=-1000,
max_number=1000,
min_date=datetime(1990, 1, 1),
max_date=datetime(2030, 1, 1),
null_val_percentage=0.1,
max_decimal_fractional_digits=2):
if type(min_number) != int or type(max_number) != int:
raise Exception("min_number and max_number must be integers but were %s and %s"
% (type(min_number), type(max_number)))
self.min_number = min_number
self.max_number = max_number
self.min_date = min_date
self.max_date = max_date
self.null_val_percentage = null_val_percentage
self.max_decimal_fractional_digits = max_decimal_fractional_digits
def generate_val(self, val_type):
'''Generate and return a single random val. Use the val_type parameter to
specify the type of val to generate. See types.py for valid val_type
options.
Ex:
generator = RandomValGenerator(min_number=1, max_number=5)
val = generator.generate_val(model.Int)
assert 1 <= val and val <= 5
'''
if issubclass(val_type, Char):
val = self.generate_val(Int)
return None if val is None else str(val)[:val_type.MAX]
if random() < self.null_val_percentage:
return None
if issubclass(val_type, Int):
return randint(
max(self.min_number, val_type.MIN), min(val_type.MAX, self.max_number))
if issubclass(val_type, Decimal):
# Create an int within the maximum length of the Decimal, then shift the decimal
# point as needed.
if val_type.MAX_FRACTIONAL_DIGITS > self.max_decimal_fractional_digits:
max_digits = val_type.MAX_DIGITS \
- (val_type.MAX_FRACTIONAL_DIGITS - self.max_decimal_fractional_digits)
fractal_digits = self.max_decimal_fractional_digits
else:
max_digits = val_type.MAX_DIGITS
fractal_digits = val_type.MAX_FRACTIONAL_DIGITS
max_type_val = 10 ** max_digits
decimal_point_shift = 10 ** fractal_digits
max_val = min(self.max_number * decimal_point_shift, max_type_val)
min_val = max(self.min_number * decimal_point_shift, -1 * max_type_val)
return PyDecimal(randint(min_val + 1, max_val - 1)) / decimal_point_shift
if issubclass(val_type, Float):
return uniform(self.min_number, self.max_number)
if issubclass(val_type, Timestamp):
delta = self.max_date - self.min_date
delta_in_seconds = delta.days * 24 * 60 * 60 + delta.seconds
offset_in_seconds = randint(0, delta_in_seconds)
val = self.min_date + timedelta(0, offset_in_seconds)
return datetime(val.year, val.month, val.day)
if issubclass(val_type, Boolean):
return randint(0, 1) == 1
raise Exception('Unsupported type %s' % val_type.__name__)
class DatabasePopulator(object):
'''This class will populate a database with randomly generated data. The population
includes table creation and data generation. Table names are hard coded as
table_<table number>.
'''
def __init__(self):
self.val_generator = RandomValGenerator()
def populate_db_with_random_data(self,
db_name,
db_connectors,
min_number_of_tables,
max_number_of_tables,
min_number_of_cols,
max_number_of_cols,
min_number_of_rows,
max_number_of_rows,
allowed_storage_formats,
create_files):
'''Create tables with a random number of cols.
The given db_name must have already been created.
'''
connections = list()
hive_connection = None
for connector in db_connectors:
connection = connector.create_connection(db_name=db_name)
connections.append(connection)
if connector.db_type == IMPALA:
# The Impala table creator needs help from Hive for some storage formats.
# Eventually Impala should be able to write in all formats and this can be
# removed.
hive_connection = DbConnector(HIVE).create_connection(db_name=db_name)
connection.hive_connection = hive_connection
for table_idx in xrange(randint(min_number_of_tables, max_number_of_tables)):
table = self.create_random_table(
'table_%s' % (table_idx + 1),
min_number_of_cols,
max_number_of_cols,
allowed_storage_formats)
for connection in connections:
connection.bulk_load_data_file = open(
"/tmp/%s_%s.data" % (table.name, connection.db_type.lower()), "w")
connection.begin_bulk_load_table(table)
row_count = randint(min_number_of_rows, max_number_of_rows)
LOG.info('Inserting %s rows into %s', row_count, table.name)
while row_count:
batch_size = min(1000, row_count)
rows = self.generate_table_data(table, number_of_rows=batch_size)
row_count -= batch_size
for connection in connections:
connection.handle_bulk_load_table_data(rows)
for connection in connections:
connection.end_bulk_load_table()
self.index_tables_in_database(connections)
for connection in connections:
connection.close()
if hive_connection:
hive_connection.close()
def migrate_database(self,
db_name,
source_db_connector,
destination_db_connectors,
include_table_names=None):
'''Read table metadata and data from the source database and create a replica in
the destination databases. For example, the Impala functional test database could
be copied into Postgresql.
source_db_connector and items in destination_db_connectors should be
of type db_connector.DbConnector. destination_db_connectors and
include_table_names should be iterables.
'''
source_connection = source_db_connector.create_connection(db_name)
cursors = [connector.create_connection(db_name=db_name).create_cursor()
for connector in destination_db_connectors]
for table_name in source_connection.list_table_names():
if include_table_names and table_name not in include_table_names:
continue
try:
table = source_connection.describe_table(table_name)
except Exception as e:
LOG.warn('Error fetching metadata for %s: %s', table_name, e)
continue
for destination_cursor in cursors:
sql = destination_cursor.connection.make_create_table_sql(table)
destination_cursor.execute(sql)
with source_connection.open_cursor() as source_cursor:
try:
source_cursor.execute('SELECT * FROM ' + table_name)
while True:
rows = source_cursor.fetchmany(size=100)
if not rows:
break
for destination_cursor in cursors:
sql = destination_cursor.connection.make_insert_sql_from_data(table, rows)
destination_cursor.execute(sql)
except Exception as e:
LOG.error('Error fetching data for %s: %s', table_name, e)
continue
self.index_tables_in_database([cursor.connection for cursor in cursors])
for cursor in cursors:
cursor.close()
cursor.connection.close()
def create_random_table(self,
table_name,
min_number_of_cols,
max_number_of_cols,
allowed_storage_formats):
'''Create and return a Table with a random number of cols.'''
col_count = randint(min_number_of_cols, max_number_of_cols)
storage_format = choice(allowed_storage_formats)
table = Table(table_name)
table.storage_format = storage_format
for col_idx in xrange(col_count):
col_type = choice(TYPES)
col_type = choice(filter(lambda type_: issubclass(type_, col_type), EXACT_TYPES))
if issubclass(col_type, VarChar) and not issubclass(col_type, String):
col_type = get_varchar_class(randint(1, VarChar.MAX))
elif issubclass(col_type, Char) and not issubclass(col_type, String):
col_type = get_char_class(randint(1, Char.MAX))
elif issubclass(col_type, Decimal):
max_digits = randint(1, Decimal.MAX_DIGITS)
col_type = get_decimal_class(max_digits, randint(1, max_digits))
col = Column(
table,
'%s_col_%s' % (col_type.__name__.lower(), col_idx + 1),
col_type)
table.cols.append(col)
return table
def generate_table_data(self, table, number_of_rows=100):
rows = list()
for row_idx in xrange(number_of_rows):
row = list()
for col in table.cols:
row.append(self.val_generator.generate_val(col.exact_type))
rows.append(row)
return rows
def drop_and_create_database(self, db_name, db_connectors):
for connector in db_connectors:
with connector.open_connection() as connection:
connection.drop_db_if_exists(db_name)
connection.create_database(db_name)
def index_tables_in_database(self, connections):
for connection in connections:
if connection.supports_index_creation:
for table_name in connection.list_table_names():
LOG.info('Indexing %s on %s' % (table_name, connection.db_type))
connection.index_table(table_name)
if __name__ == '__main__':
import logging
from optparse import NO_DEFAULT, OptionGroup, OptionParser
import tests.comparison.cli_options as cli_options
parser = OptionParser(
usage='usage: \n'
' %prog [options] [populate]\n\n'
' Create and populate database(s). The Impala database will always be \n'
' included, the other database types are optional.\n\n'
' %prog [options] migrate\n\n'
' Migrate an Impala database to another database type. The destination \n'
' database will be dropped and recreated.')
cli_options.add_logging_options(parser)
cli_options.add_db_name_option(parser)
cli_options.add_connection_option_groups(parser)
group = OptionGroup(parser, 'Database Population Options')
group.add_option('--randomization-seed', default=1, type='int',
help='The randomization will be initialized with this seed. Using the same seed '
'will produce the same results across runs.')
storage_formats = ['avro', 'parquet', 'rcfile', 'sequencefile', 'textfile']
group.add_option('--storage-file-formats', default=','.join(storage_formats),
help='A comma separated list of storage formats to choose from.')
group.add_option('--create-data-files', default=False, action='store_true',
help='Create files that can be used to repopulate the databases elsewhere.')
group.add_option('--min-table-count', default=10, type='int',
help='The minimum number of tables to generate.')
group.add_option('--max-table-count', default=100, type='int',
help='The maximum number of tables to generate.')
group.add_option('--min-column-count', default=10, type='int',
help='The minimum number of columns to generate per table.')
group.add_option('--max-column-count', default=100, type='int',
help='The maximum number of columns to generate per table.')
group.add_option('--min-row-count', default=(10 ** 3), type='int',
help='The minimum number of rows to generate per table.')
group.add_option('--max-row-count', default=(10 ** 6), type='int',
help='The maximum number of rows to generate per table.')
parser.add_option_group(group)
group = OptionGroup(parser, 'Database Migration Options')
group.add_option('--migrate-table-names',
help='Table names should be separated with commas. The default is to migrate all '
'tables.')
parser.add_option_group(group)
for group in parser.option_groups + [parser]:
for option in group.option_list:
if option.default != NO_DEFAULT:
option.help += ' [default: %default]'
options, args = parser.parse_args()
command = args[0] if args else 'populate'
if len(args) > 1 or command not in ['populate', 'migrate']:
raise Exception('Command must either be "populate" or "migrate" but was "%s"' %
' '.join(args))
if command == 'migrate' and \
not any((options.use_mysql, options.use_postgresql, options.use_oracle)):
raise Exception('At least one destination database must be chosen with '
'--use-<database type>')
basicConfig(level=getattr(logging, options.log_level))
seed(options.randomization_seed)
impala_connector = DbConnector(IMPALA)
db_connectors = []
if options.use_postgresql:
db_connectors.append(DbConnector(POSTGRESQL,
user_name=options.postgresql_user,
password=options.postgresql_password,
host_name=options.postgresql_host,
port=options.postgresql_port))
if options.use_oracle:
db_connectors.append(DbConnector(ORACLE,
user_name=options.oracle_user,
password=options.oracle_password,
host_name=options.oracle_host,
port=options.oracle_port))
if options.use_mysql:
db_connectors.append(DbConnector(MYSQL,
user_name=options.mysql_user,
password=options.mysql_password,
host_name=options.mysql_host,
port=options.mysql_port))
populator = DatabasePopulator()
if command == 'populate':
db_connectors.append(impala_connector)
populator.drop_and_create_database(options.db_name, db_connectors)
populator.populate_db_with_random_data(
options.db_name,
db_connectors,
options.min_table_count,
options.max_table_count,
options.min_column_count,
options.max_column_count,
options.min_row_count,
options.max_row_count,
options.storage_file_formats.split(','),
options.create_data_files)
else:
populator.drop_and_create_database(options.db_name, db_connectors)
if options.migrate_table_names:
table_names = options.migrate_table_names.split(',')
else:
table_names = None
populator.migrate_database(
options.db_name,
impala_connector,
db_connectors,
include_table_names=table_names)
|
vidartf/hyperspy | refs/heads/master | hyperspy/io_plugins/blockfile.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
from datetime import datetime, timedelta
from dateutil import tz
from traits.api import Undefined
import numpy as np
import logging
import warnings
from hyperspy.misc.array_tools import sarray2dict, dict2sarray
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'Blockfile'
description = 'Read/write support for ASTAR blockfiles'
full_support = False
# Recognised file extension
file_extensions = ['blo', 'BLO']
default_extension = 0
# Writing capabilities:
writes = [(2, 2), (2, 1), (2, 0)]
magics = [0x0102]
def _from_serial_date(serial):
# Excel date&time format
origin = datetime(1899, 12, 30, tzinfo=tz.tzutc())
secs = (serial % 1.0) * 86400.0
dt = timedelta(int(serial), secs, secs / 1000)
utc = origin + dt
return utc.astimezone(tz.tzlocal())
def _to_serial_date(dt):
origin = datetime(1899, 12, 30, tzinfo=tz.tzutc())
delta = dt - origin
return float(delta.days) + (float(delta.seconds) / 86400)
mapping = {
'blockfile_header.Beam_energy':
("Acquisition_instrument.TEM.beam_energy", lambda x: x * 1e-3),
'blockfile_header.Acquisition_time':
("General.time", _from_serial_date),
'blockfile_header.Camera_length':
("Acquisition_instrument.TEM.camera_length", lambda x: x * 1e-4),
'blockfile_header.Scan_rotation':
("Acquisition_instrument.TEM.scan_rotation", lambda x: x * 1e-2),
}
def get_header_dtype_list(endianess='<'):
end = endianess
dtype_list = \
[
('ID', (bytes, 6)),
('MAGIC', end + 'u2'),
('Data_offset_1', end + 'u4'), # Offset VBF
('Data_offset_2', end + 'u4'), # Offset DPs
('UNKNOWN1', end + 'u4'), # Flags for ASTAR software?
('DP_SZ', end + 'u2'), # Pixel dim DPs
('DP_rotation', end + 'u2'), # [degrees ( * 100 ?)]
('NX', end + 'u2'), # Scan dim 1
('NY', end + 'u2'), # Scan dim 2
('Scan_rotation', end + 'u2'), # [100 * degrees]
('SX', end + 'f8'), # Pixel size [nm]
('SY', end + 'f8'), # Pixel size [nm]
('Beam_energy', end + 'u4'), # [V]
('SDP', end + 'u2'), # Pixel size [100 * ppcm]
('Camera_length', end + 'u4'), # [10 * mm]
('Acquisition_time', end + 'f8'), # [Serial date]
] + [
('Centering_N%d' % i, 'f8') for i in range(8)
] + [
('Distortion_N%02d' % i, 'f8') for i in range(14)
]
return dtype_list
def get_default_header(endianess='<'):
"""Returns a header pre-populated with default values.
"""
dt = np.dtype(get_header_dtype_list())
header = np.zeros((1,), dtype=dt)
header['ID'][0] = 'IMGBLO'.encode()
header['MAGIC'][0] = magics[0]
header['Data_offset_1'][0] = 0x1000 # Always this value observed
header['UNKNOWN1'][0] = 131141 # Very typical value (always?)
header['Acquisition_time'][0] = _to_serial_date(
datetime.fromtimestamp(86400, tz.tzutc()))
# Default to UNIX epoch + 1 day
# Have to add 1 day, as dateutil's timezones dont work before epoch
return header
def get_header_from_signal(signal, endianess='<'):
header = get_default_header(endianess)
if 'blockfile_header' in signal.original_metadata:
header = dict2sarray(signal.original_metadata['blockfile_header'],
sarray=header)
note = signal.original_metadata['blockfile_header']['Note']
else:
note = ''
if signal.axes_manager.navigation_dimension == 2:
NX, NY = signal.axes_manager.navigation_shape
SX = signal.axes_manager.navigation_axes[0].scale
SY = signal.axes_manager.navigation_axes[1].scale
elif signal.axes_manager.navigation_dimension == 1:
NX = signal.axes_manager.navigation_shape[0]
NY = 1
SX = signal.axes_manager.navigation_axes[0].scale
SY = SX
elif signal.axes_manager.navigation_dimension == 0:
NX = NY = SX = SY = 1
DP_SZ = signal.axes_manager.signal_shape
if DP_SZ[0] != DP_SZ[1]:
raise ValueError('Blockfiles require signal shape to be square!')
DP_SZ = DP_SZ[0]
SDP = 100. / signal.axes_manager.signal_axes[0].scale
offset2 = NX * NY + header['Data_offset_1']
# Based on inspected files, the DPs are stored at 16-bit boundary...
# Normally, you'd expect word alignment (32-bits) ¯\_(°_o)_/¯
offset2 += offset2 % 16
header = dict2sarray({
'NX': NX, 'NY': NY,
'DP_SZ': DP_SZ,
'SX': SX, 'SY': SY,
'SDP': SDP,
'Data_offset_2': offset2,
}, sarray=header)
return header, note
def file_reader(filename, endianess='<', load_to_memory=True, mmap_mode='c',
**kwds):
_logger.debug("Reading blockfile: %s" % filename)
metadata = {}
# Makes sure we open in right mode:
if '+' in mmap_mode or ('write' in mmap_mode and
'copyonwrite' != mmap_mode):
f = open(filename, 'r+b')
else:
f = open(filename, 'rb')
_logger.debug("File opened")
# Get header
header = np.fromfile(f, dtype=get_header_dtype_list(endianess), count=1)
if header['MAGIC'][0] not in magics:
warnings.warn("Blockfile has unrecognized header signature. "
"Will attempt to read, but correcteness not guaranteed!")
header = sarray2dict(header)
note = f.read(header['Data_offset_1'] - f.tell())
note = note.strip(b'\x00')
header['Note'] = note.decode()
_logger.debug("File header: " + str(header))
NX, NY = int(header['NX']), int(header['NY'])
DP_SZ = int(header['DP_SZ'])
if header['SDP']:
SDP = 100. / header['SDP']
else:
SDP = Undefined
original_metadata = {'blockfile_header': header}
# Get data:
# A Virtual BF/DF is stored first
# offset1 = header['Data_offset_1']
# f.seek(offset1)
# data_pre = np.array(f.read(NX*NY), dtype=endianess+'u1'
# ).squeeze().reshape((NX, NY), order='C').T
# Then comes actual blockfile
offset2 = header['Data_offset_2']
if load_to_memory:
f.seek(offset2)
data = np.fromfile(f, dtype=endianess + 'u1')
else:
data = np.memmap(f, mode=mmap_mode, offset=offset2,
dtype=endianess + 'u1')
try:
data = data.reshape((NY, NX, DP_SZ * DP_SZ + 6))
except ValueError:
warnings.warn(
'Blockfile header dimensions larger than file size! '
'Will attempt to load by zero padding incomplete frames.')
# Data is stored DP by DP:
pw = [(0, NX * NY * (DP_SZ * DP_SZ + 6) - data.size)]
data = np.pad(data, pw, mode='constant')
data = data.reshape((NY, NX, DP_SZ * DP_SZ + 6))
# Every frame is preceeded by a 6 byte sequence (AA 55, and then a 4 byte
# integer specifying frame number)
data = data[:, :, 6:]
data = data.reshape((NY, NX, DP_SZ, DP_SZ), order='C').squeeze()
units = ['nm', 'nm', 'cm', 'cm']
names = ['y', 'x', 'dy', 'dx']
scales = [header['SY'], header['SX'], SDP, SDP]
metadata = {'General': {'original_filename': os.path.split(filename)[1]},
"Signal": {'signal_type': "diffraction",
'record_by': 'image', },
}
# Create the axis objects for each axis
dim = data.ndim
axes = [
{
'size': data.shape[i],
'index_in_array': i,
'name': names[i],
'scale': scales[i],
'offset': 0.0,
'units': units[i], }
for i in range(dim)]
dictionary = {'data': data,
'axes': axes,
'metadata': metadata,
'original_metadata': original_metadata,
'mapping': mapping, }
f.close()
return [dictionary, ]
def file_writer(filename, signal, **kwds):
endianess = kwds.pop('endianess', '<')
header, note = get_header_from_signal(signal, endianess=endianess)
with open(filename, 'wb') as f:
# Write header
header.tofile(f)
# Write header note field:
if len(note) > int(header['Data_offset_1']) - f.tell():
note = note[:int(header['Data_offset_1']) - f.tell() - len(note)]
f.write(note.encode())
# Zero pad until next data block
zero_pad = int(header['Data_offset_1']) - f.tell()
np.zeros((zero_pad,), np.byte).tofile(f)
# Write virtual bright field
vbf = signal.mean(
signal.axes_manager.signal_axes[
:2]).data.astype(
endianess +
'u1')
vbf.tofile(f)
# Zero pad until next data block
if f.tell() > int(header['Data_offset_2']):
raise ValueError("Signal navigation size does not match "
"data dimensions.")
zero_pad = int(header['Data_offset_2']) - f.tell()
np.zeros((zero_pad,), np.byte).tofile(f)
# Write full data stack:
# We need to pad each image with magic 'AA55', then a u32 serial
dp_head = np.zeros((1,), dtype=[('MAGIC', endianess + 'u2'),
('ID', endianess + 'u4')])
dp_head['MAGIC'] = 0x55AA
# Write by loop:
for img in signal._iterate_signal():
dp_head.tofile(f)
img.astype(endianess + 'u1').tofile(f)
dp_head['ID'] += 1
|
rhelmer/socorro-lib | refs/heads/master | alembic/versions/36abe253f8b3_fixes_902142_partiti.py | 16 | """Fixes 902142 partition matviews
Revision ID: 36abe253f8b3
Revises: 2645cb324bf4
Create Date: 2013-08-07 10:17:05.769404
"""
# revision identifiers, used by Alembic.
revision = '36abe253f8b3'
down_revision = '2645cb324bf4'
import os
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "json"
def upgrade():
op.add_column(u'report_partition_info', sa.Column(u'partition_column', sa.TEXT()))
# Drop FK constraints on parent tables
op.drop_constraint('signature_summary_architecture_product_version_id_fkey', 'signature_summary_architecture')
op.drop_constraint('signature_summary_architecture_signature_id_fkey', 'signature_summary_architecture')
op.drop_constraint('signature_summary_flash_version_product_version_id_fkey', 'signature_summary_flash_version')
op.drop_constraint('signature_summary_flash_version_signature_id_fkey', 'signature_summary_flash_version')
op.drop_constraint('signature_summary_installations_signature_id_fkey', 'signature_summary_installations')
op.drop_constraint('signature_summary_os_product_version_id_fkey', 'signature_summary_os')
op.drop_constraint('signature_summary_os_signature_id_fkey', 'signature_summary_os')
op.drop_constraint('signature_summary_process_type_product_version_id_fkey', 'signature_summary_process_type')
op.drop_constraint('signature_summary_process_type_signature_id_fkey', 'signature_summary_process_type')
op.drop_constraint('signature_summary_products_product_version_id_fkey', 'signature_summary_products')
op.drop_constraint('signature_summary_products_signature_id_fkey', 'signature_summary_products')
op.drop_constraint('signature_summary_uptime_product_version_id_fkey', 'signature_summary_uptime')
op.drop_constraint('signature_summary_uptime_signature_id_fkey', 'signature_summary_uptime')
app_path=os.getcwd()
procs = ['weekly_report_partitions.sql',
'backfill_weekly_report_partitions.sql']
for myfile in [app_path + '/socorro/external/postgresql/raw_sql/procs/' + line for line in procs]:
proc = open(myfile, 'r').read()
op.execute(proc)
# Now run this against the raw_crashes table
op.execute("""
UPDATE report_partition_info
SET partition_column = 'date_processed'
""")
report_partition_info = table(u'report_partition_info',
column(u'build_order', sa.INTEGER()),
column(u'fkeys', postgresql.ARRAY(sa.TEXT())),
column(u'indexes', postgresql.ARRAY(sa.TEXT())),
column(u'keys', postgresql.ARRAY(sa.TEXT())),
column(u'table_name', CITEXT()),
column(u'partition_column', sa.TEXT()),
)
op.bulk_insert(report_partition_info, [
{'table_name': u'signature_summary_installations',
'build_order': 5,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)"],
'partition_column': 'report_date',
'keys': ["signature_id,product_name,version_string,report_date"],
'indexes': ["report_date"],
},
{'table_name': u'signature_summary_architecture',
'build_order': 6,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)", "(product_version_id) REFERENCES product_versions(product_version_id)"],
'partition_column': 'report_date',
'keys': ["signature_id, architecture, product_version_id, report_date"],
'indexes': ["report_date"],
},
{'table_name': u'signature_summary_flash_version',
'build_order': 7,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)", "(product_version_id) REFERENCES product_versions(product_version_id)"],
'partition_column': 'report_date',
'keys': ["signature_id, flash_version, product_version_id, report_date"],
'indexes': ["report_date"],
},
{'table_name': u'signature_summary_os',
'build_order': 8,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)", "(product_version_id) REFERENCES product_versions(product_version_id)"],
'partition_column': 'report_date',
'keys': ["signature_id, os_version_string, product_version_id, report_date"],
'indexes': ["report_date"],
},
{'table_name': u'signature_summary_process_type',
'build_order': 9,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)", "(product_version_id) REFERENCES product_versions(product_version_id)"],
'partition_column': 'report_date',
'keys': ["signature_id, process_type, product_version_id, report_date"],
'indexes': ["report_date"],
},
{'table_name': u'signature_summary_products',
'build_order': 10,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)", "(product_version_id) REFERENCES product_versions(product_version_id)"],
'partition_column': 'report_date',
'keys': ["signature_id, product_version_id, report_date"],
'indexes': ["report_date"],
},
{'table_name': u'signature_summary_uptime',
'build_order': 11,
'fkeys': ["(signature_id) REFERENCES signatures(signature_id)", "(product_version_id) REFERENCES product_versions(product_version_id)"],
'partition_column': 'report_date',
'keys': ["signature_id, uptime_string, product_version_id, report_date"],
'indexes': ["report_date"],
},
])
op.alter_column(u'report_partition_info', u'partition_column',
existing_type=sa.TEXT(),
nullable=False)
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_architecture')
""")
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_flash_version')
""")
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_installations')
""")
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_os')
""")
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_process_type')
""")
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_products')
""")
op.execute("""
SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_uptime')
""")
def downgrade():
op.execute("""
DELETE from report_partition_info
WHERE table_name ~ '^signature_summary_'
""")
op.drop_column(u'report_partition_info', u'partition_column')
# Drop all child tables (?)
op.execute("""
DO $$DECLARE mytable record;
BEGIN
FOR mytable IN SELECT relname FROM pg_class
WHERE relname ~ '^signature_summary_*201*' and relkind = 'r'
LOOP
EXECUTE 'DROP TABLE ' || quote_ident(mytable.relname) || ' CASCADE';
END LOOP;
END$$
""")
# Drop FK constraints on parent tables
op.create_foreign_key('signature_summary_architecture_product_version_id_fkey', 'signature_summary_architecture', 'product_versions', ["product_version_id"], ["product_version_id"])
op.create_foreign_key('signature_summary_architecture_signature_id_fkey', 'signature_summary_architecture', 'signatures', ["signature_id"], ["signature_id"])
op.create_foreign_key('signature_summary_flash_version_product_version_id_fkey', 'signature_summary_flash_version', 'product_versions', ["product_version_id"], ["product_version_id"])
op.create_foreign_key('signature_summary_flash_version_signature_id_fkey', 'signature_summary_flash_version', 'signatures', ["signature_id"], ["signature_id"])
op.create_foreign_key('signature_summary_installations_signature_id_fkey', 'signature_summary_installations', 'signatures', ["signature_id"], ["signature_id"])
op.create_foreign_key('signature_summary_os_product_version_id_fkey', 'signature_summary_os', 'product_versions', ["product_version_id"], ["product_version_id"])
op.create_foreign_key('signature_summary_os_signature_id_fkey', 'signature_summary_os', 'signatures', ["signature_id"], ["signature_id"])
op.create_foreign_key('signature_summary_process_type_product_version_id_fkey', 'signature_summary_process_type', 'product_versions', ["product_version_id"], ["product_version_id"])
op.create_foreign_key('signature_summary_process_type_signature_id_fkey', 'signature_summary_process_type', 'signatures', ["signature_id"], ["signature_id"])
op.create_foreign_key('signature_summary_products_product_version_id_fkey', 'signature_summary_products', 'product_versions', ["product_version_id"], ["product_version_id"])
op.create_foreign_key('signature_summary_products_signature_id_fkey', 'signature_summary_products', 'signatures', ["signature_id"], ["signature_id"])
op.create_foreign_key('signature_summary_uptime_product_version_id_fkey', 'signature_summary_uptime', 'product_versions', ["product_version_id"], ["product_version_id"])
op.create_foreign_key('signature_summary_uptime_signature_id_fkey', 'signature_summary_uptime', 'signatures', ["signature_id"], ["signature_id"])
|
Manishearth/servo | refs/heads/master | tests/wpt/css-tests/tools/serve/serve.py | 215 | # -*- coding: utf-8 -*-
import argparse
import json
import os
import signal
import socket
import sys
import threading
import time
import traceback
import urllib2
import uuid
from collections import defaultdict, OrderedDict
from multiprocessing import Process, Event
from .. import localpaths
import sslutils
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve.logger import set_logger
from mod_pywebsocket import standalone as pywebsocket
repo_root = localpaths.repo_root
class WorkersHandler(object):
def __init__(self):
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
return self.handler(request, response)
def handle_request(self, request, response):
worker_path = request.url_parts.path.replace(".worker", ".worker.js")
return """<!doctype html>
<meta charset=utf-8>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%s"));
</script>
""" % (worker_path,)
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
subdomains = [u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/serve.py", handlers.ErrorHandler(404))]
self.static = [("GET", "*.worker", WorkersHandler())]
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.static
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_static(self, path, format_args, content_type, route):
handler = handlers.StaticHandler(path, format_args, content_type)
self.static.append((b"GET", str(route), handler))
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [("GET", "*.asis", handlers.AsIsHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
b"%s%s" % (str(url_base) if url_base != "/" else "", str(suffix)),
handler_cls(base_path=path, url_base=url_base)))
def default_routes():
return RoutesBuilder().get_routes()
def setup_logger(level):
import logging
global logger
logger = logging.getLogger("web-platform-tests")
logging.basicConfig(level=getattr(logging, level.upper()))
set_logger(logger)
def open_socket(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port != 0:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', port))
sock.listen(5)
return sock
def get_port():
free_socket = open_socket(0)
port = free_socket.getsockname()[1]
logger.debug("Going to use port %s" % port)
free_socket.close()
return port
class ServerProc(object):
def __init__(self):
self.proc = None
self.daemon = None
self.stop = Event()
def start(self, init_func, host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config))
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
except socket.error:
print >> sys.stderr, "Socket error on port %s" % port
raise
except:
print >> sys.stderr, traceback.format_exc()
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except:
print >> sys.stderr, traceback.format_exc()
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(host, paths, bind_hostname, ssl_config):
port = get_port()
subdomains = get_subdomains(host)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, default_routes(), bind_hostname,
None, ssl_config)
connected = False
for i in range(10):
try:
urllib2.urlopen("http://%s:%d/" % (host, port))
connected = True
break
except urllib2.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server on http://%s:%s You may need to edit /etc/hosts or similar" % (host, port))
sys.exit(1)
for subdomain, (punycode, host) in subdomains.iteritems():
domain = "%s.%s" % (punycode, host)
try:
urllib2.urlopen("http://%s:%d/" % (domain, port))
except Exception as e:
logger.critical("Failed probing domain %s. You may need to edit /etc/hosts or similar." % domain)
sys.exit(1)
wrapper.wait()
def get_subdomains(host):
#This assumes that the tld is ascii-only or already in punycode
return {subdomain: (subdomain.encode("idna"), host)
for subdomain in subdomains}
def start_servers(host, ports, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
servers = defaultdict(list)
for scheme, ports in ports.iteritems():
assert len(ports) == {"http":2}.get(scheme, 1)
for port in ports:
if port is None:
continue
init_func = {"http":start_http_server,
"https":start_https_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
server_proc = ServerProc()
server_proc.start(init_func, host, port, paths, routes, bind_hostname,
external_config, ssl_config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_hostname=bind_hostname,
config=external_config,
use_ssl=True,
key_file=ssl_config["key_path"],
certificate=ssl_config["cert_path"],
encrypt_after_connect=ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, log_level, bind_hostname,
ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root,
"--log-level", log_level]
if ssl_config is not None:
# This is usually done through pywebsocket.main, however we're
# working around that to get the server instance and manually
# setup the wss server.
if pywebsocket._import_ssl():
tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
elif pywebsocket._import_pyopenssl():
tls_module = pywebsocket._TLS_BY_PYOPENSSL
else:
print "No SSL module available"
sys.exit(1)
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"],
"--tls-module", tls_module]
if (bind_hostname):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config = None)
def start_wss_server(host, port, paths, routes, bind_hostname, external_config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_hostname,
ssl_config)
def get_ports(config, ssl_environment):
rv = defaultdict(list)
for scheme, ports in config["ports"].iteritems():
for i, port in enumerate(ports):
if scheme in ["wss", "https"] and not ssl_environment.ssl_enabled:
port = None
if port == "auto":
port = get_port()
else:
port = port
rv[scheme].append(port)
return rv
def normalise_config(config, ports):
host = config["external_host"] if config["external_host"] else config["host"]
domains = get_subdomains(host)
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
for key, value in domains.iteritems():
domains[key] = ".".join(value)
domains[""] = host
ports_ = {}
for scheme, ports_used in ports.iteritems():
ports_[scheme] = ports_used
return {"host": host,
"domains": domains,
"ports": ports_}
def get_ssl_config(config, external_domains, ssl_environment):
key_path, cert_path = ssl_environment.host_cert_path(external_domains)
return {"key_path": key_path,
"cert_path": cert_path,
"encrypt_after_connect": config["ssl"]["encrypt_after_connect"]}
def start(config, ssl_environment, routes, **kwargs):
host = config["host"]
domains = get_subdomains(host)
ports = get_ports(config, ssl_environment)
bind_hostname = config["bind_hostname"]
paths = {"doc_root": config["doc_root"],
"ws_doc_root": config["ws_doc_root"]}
external_config = normalise_config(config, ports)
ssl_config = get_ssl_config(config, external_config["domains"].values(), ssl_environment)
if config["check_subdomains"]:
check_subdomains(host, paths, bind_hostname, ssl_config)
servers = start_servers(host, ports, paths, routes, bind_hostname, external_config,
ssl_config, **kwargs)
return external_config, servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def value_set(config, key):
return key in config and config[key] is not None
def get_value_or_default(config, key, default=None):
return config[key] if value_set(config, key) else default
def set_computed_defaults(config):
if not value_set(config, "doc_root"):
config["doc_root"] = repo_root
if not value_set(config, "ws_doc_root"):
root = get_value_or_default(config, "doc_root", default=repo_root)
config["ws_doc_root"] = os.path.join(root, "websockets", "handlers")
def merge_json(base_obj, override_obj):
rv = {}
for key, value in base_obj.iteritems():
if key not in override_obj:
rv[key] = value
else:
if isinstance(value, dict):
rv[key] = merge_json(value, override_obj[key])
else:
rv[key] = override_obj[key]
return rv
def get_ssl_environment(config):
implementation_type = config["ssl"]["type"]
cls = sslutils.environments[implementation_type]
try:
kwargs = config["ssl"][implementation_type].copy()
except KeyError:
raise ValueError("%s is not a vaid ssl type." % implementation_type)
return cls(logger, **kwargs)
def load_config(default_path, override_path=None, **kwargs):
if os.path.exists(default_path):
with open(default_path) as f:
base_obj = json.load(f)
else:
raise ValueError("Config path %s does not exist" % default_path)
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
else:
override_obj = {}
rv = merge_json(base_obj, override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
base_obj = rv
with open(other_path) as f:
override_obj = json.load(f)
rv = merge_json(base_obj, override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
rv[key] = value
set_computed_defaults(rv)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
return parser
def main():
kwargs = vars(get_parser().parse_args())
config = load_config("config.default.json",
"config.json",
**kwargs)
setup_logger(config["log_level"])
with stash.StashServer((config["host"], get_port()), authkey=str(uuid.uuid4())):
with get_ssl_environment(config) as ssl_env:
config_, servers = start(config, ssl_env, default_routes(), **kwargs)
try:
while any(item.is_alive() for item in iter_procs(servers)):
for item in iter_procs(servers):
item.join(1)
except KeyboardInterrupt:
logger.info("Shutting down")
|
sabi0/intellij-community | refs/heads/master | python/testData/copyPaste/Indent2.src.py | 332 | class C:
def foo(self):
<selection>x = 1
y = 2</selection>
|
catmium/PyLexTo | refs/heads/master | setup.py | 1 | from setuptools import setup
setup(
name='PyLexTo',
version='0.1.2',
description='LongLexTo with Python wrapper',
url='https://github.com/catmium/PyLexTo',
author='catmium',
packages=['PyLexTo'],
install_requires=['JPype1==0.6.2'],
license='GPL-3.0',
author_email='hello@jets.im',
package_data={'': ['LICENSE'],
'PyLexTo': ['LongLexTo/*.class', 'data/dictionary/*.txt', 'data/stopwords/*.txt']}
) |
perimosocordiae/scipy | refs/heads/master | scipy/linalg/tests/test_solve_toeplitz.py | 21 | """Test functions for linalg._solve_toeplitz module
"""
import numpy as np
from scipy.linalg._solve_toeplitz import levinson
from scipy.linalg import solve, toeplitz, solve_toeplitz
from numpy.testing import assert_equal, assert_allclose
import pytest
from pytest import raises as assert_raises
def test_solve_equivalence():
# For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
random = np.random.RandomState(1234)
for n in (1, 2, 3, 10):
c = random.randn(n)
if random.rand() < 0.5:
c = c + 1j * random.randn(n)
r = random.randn(n)
if random.rand() < 0.5:
r = r + 1j * random.randn(n)
y = random.randn(n)
if random.rand() < 0.5:
y = y + 1j * random.randn(n)
# Check equivalence when both the column and row are provided.
actual = solve_toeplitz((c,r), y)
desired = solve(toeplitz(c, r=r), y)
assert_allclose(actual, desired)
# Check equivalence when the column is provided but not the row.
actual = solve_toeplitz(c, b=y)
desired = solve(toeplitz(c), y)
assert_allclose(actual, desired)
def test_multiple_rhs():
random = np.random.RandomState(1234)
c = random.randn(4)
r = random.randn(4)
for offset in [0, 1j]:
for yshape in ((4,), (4, 3), (4, 3, 2)):
y = random.randn(*yshape) + offset
actual = solve_toeplitz((c,r), b=y)
desired = solve(toeplitz(c, r=r), y)
assert_equal(actual.shape, yshape)
assert_equal(desired.shape, yshape)
assert_allclose(actual, desired)
def test_native_list_arguments():
c = [1,2,4,7]
r = [1,3,9,12]
y = [5,1,4,2]
actual = solve_toeplitz((c,r), y)
desired = solve(toeplitz(c, r=r), y)
assert_allclose(actual, desired)
def test_zero_diag_error():
# The Levinson-Durbin implementation fails when the diagonal is zero.
random = np.random.RandomState(1234)
n = 4
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
c[0] = 0
assert_raises(np.linalg.LinAlgError,
solve_toeplitz, (c, r), b=y)
def test_wikipedia_counterexample():
# The Levinson-Durbin implementation also fails in other cases.
# This example is from the talk page of the wikipedia article.
random = np.random.RandomState(1234)
c = [2, 2, 1]
y = random.randn(3)
assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)
def test_reflection_coeffs():
# check that that the partial solutions are given by the reflection
# coefficients
random = np.random.RandomState(1234)
y_d = random.randn(10)
y_z = random.randn(10) + 1j
reflection_coeffs_d = [1]
reflection_coeffs_z = [1]
for i in range(2, 10):
reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])
y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
_, ref_d = levinson(y_d_concat, b=y_d[1:])
_, ref_z = levinson(y_z_concat, b=y_z[1:])
assert_allclose(reflection_coeffs_d, ref_d[:-1])
assert_allclose(reflection_coeffs_z, ref_z[:-1])
@pytest.mark.xfail(reason='Instability of Levinson iteration')
def test_unstable():
# this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
# I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
# Partial Pivoting for Matrices with Displacement Structure"
# Mathematics of Computation, 64, 212 (1995), pp 1557-1576
# which can be unstable for levinson recursion.
# other fast toeplitz solvers such as GKO or Burg should be better.
random = np.random.RandomState(1234)
n = 100
c = 0.9 ** (np.arange(n)**2)
y = random.randn(n)
solution1 = solve_toeplitz(c, b=y)
solution2 = solve(toeplitz(c), y)
assert_allclose(solution1, solution2)
|
njwilson23/scipy | refs/heads/master | scipy/stats/tests/test_continuous_basic.py | 16 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import numpy.testing as npt
from scipy import integrate
from scipy import stats
from scipy.special import betainc
from common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect, check_kurt_expect,
check_entropy, check_private_entropy, NUMPY_BELOW_1_7,
check_edge_support, check_named_args, check_random_state_property)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
## Note that you need to add new distributions you want tested
## to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
## Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
# for testing only specific functions
# distcont = [
## ['fatiguelife', (29,)], #correction numargs = 1
## ['loggamma', (0.41411931826052117,)]]
# for testing ticket:767
# distcont = [
## ['genextreme', (3.3184017469423535,)],
## ['genextreme', (0.01,)],
## ['genextreme', (0.00001,)],
## ['genextreme', (0.0,)],
## ['genextreme', (-0.01,)]
## ]
# distcont = [['gumbel_l', ()],
## ['gumbel_r', ()],
## ['norm', ()]
## ]
# distcont = [['norm', ()]]
distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous',
'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone',
'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss', 'levy',
'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy',
'johnsonsb', 'truncexpon', 'invgauss', 'invgamma',
'powerlognorm']
distmiss = [[dist,args] for dist,args in distcont if dist in distmissing]
distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# NB: not needed anymore?
def _silence_fp_errors(func):
# warning: don't apply to test_ functions as is, then those will be skipped
def wrap(*a, **kw):
olderr = np.seterr(all='ignore')
try:
return func(*a, **kw)
finally:
np.seterr(**olderr)
wrap.__name__ = func.__name__
return wrap
def test_cont_basic():
# this test skips slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, \
distname + 'sample mean test'
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3}
x = spec_x.get(distname, 0.5)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
# Entropy
skp = npt.dec.skipif
yield check_entropy, distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
knf = npt.dec.knownfailureif
yield knf(distname == 'truncnorm')(check_ppf_private), distfn, \
arg, distname
@npt.dec.slow
def test_cont_basic_slow():
# same as above for slow distributions
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
for distname, arg in distcont[:]:
if distname not in distslow:
continue
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
np.random.seed(765456)
sn = 500
rvs = distfn.rvs(size=sn,*arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, \
distname + 'sample mean test'
yield check_cdf_ppf, distfn, arg, distname
yield check_sf_isf, distfn, arg, distname
yield check_pdf, distfn, arg, distname
yield check_pdf_logpdf, distfn, arg, distname
yield check_cdf_logcdf, distfn, arg, distname
yield check_sf_logsf, distfn, arg, distname
# yield check_oth, distfn, arg # is still missing
if distname in distmissing:
alpha = 0.01
yield check_distribution_rvs, distname, arg, alpha, rvs
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
x = 0.5
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
yield check_named_args, distfn, x, arg, locscale_defaults, meths
yield check_random_state_property, distfn, arg
# Entropy
skp = npt.dec.skipif
ks_cond = distname in ['ksone', 'kstwobign']
yield skp(ks_cond)(check_entropy), distfn, arg, distname
if distfn.numargs == 0:
yield skp(NUMPY_BELOW_1_7)(check_vecentropy), distfn, arg
if distfn.__class__._entropy != stats.rv_continuous._entropy:
yield check_private_entropy, distfn, arg, stats.rv_continuous
yield check_edge_support, distfn, arg
@npt.dec.slow
def test_moments():
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=integrate.IntegrationWarning)
knf = npt.dec.knownfailureif
fail_normalization = set(['vonmises', 'ksone'])
fail_higher = set(['vonmises', 'ksone', 'ncf'])
for distname, arg in distcont[:]:
if distname is 'levy_stable':
continue
distfn = getattr(stats, distname)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
cond1, cond2 = distname in fail_normalization, distname in fail_higher
msg = distname + ' fails moments'
yield knf(cond1, msg)(check_normalization), distfn, arg, distname
yield knf(cond2, msg)(check_mean_expect), distfn, arg, m, distname
yield knf(cond2, msg)(check_var_expect), distfn, arg, m, v, distname
yield knf(cond2, msg)(check_skew_expect), distfn, arg, m, v, s, \
distname
yield knf(cond2, msg)(check_kurt_expect), distfn, arg, m, v, k, \
distname
yield check_loc_scale, distfn, arg, m, v, distname
yield check_moment, distfn, arg, m, v, distname
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if np.isfinite(m):
check_sample_mean(sm, sv, sn, m)
if np.isfinite(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm,v,n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv,n, popvar):
# two-sided chisquare test for sample variance equal to hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn,arg,msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn,arg,msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1,0.5,0.9], *arg), *arg),
[0.1,0.5,0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1,0.9], *arg),
1.0-distfn.sf([0.1,0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
npt.assert_almost_equal(pdfv, cdfdiff,
decimal=DECIMAL, err_msg=msg + ' - cdf-pdf relationship')
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship")
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship")
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship")
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D,pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(dist,'',args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
@npt.dec.skipif(NUMPY_BELOW_1_7)
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
#fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
if __name__ == "__main__":
npt.run_module_suite()
|
stackforge/yaql | refs/heads/master | yaql/standard_library/common.py | 2 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common module describes comparison operators for different types. Comparing
with null value is considered separately.
"""
from yaql.language import specs
@specs.name('*equal')
def eq(left, right):
""":yaql:operator =
Returns true if left and right are equal, false otherwise.
It is system function and can be used to override behavior
of comparison between objects.
"""
return left == right
@specs.name('*not_equal')
def neq(left, right):
""":yaql:operator !=
Returns true if left and right are not equal, false otherwise.
It is system function and can be used to override behavior
of comparison between objects.
"""
return left != right
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_<')
def left_lt_null(left, right):
""":yaql:operator <
Returns false. This function is called when left is not null and
right is null.
:signature: left < right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 < null
false
"""
return False
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_<=')
def left_lte_null(left, right):
""":yaql:operator <=
Returns false. This function is called when left is not null
and right is null.
:signature: left <= right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 <= null
false
"""
return False
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_>')
def left_gt_null(left, right):
""":yaql:operator >
Returns true. This function is called when left is not null
and right is null.
:signature: left > right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 > null
true
"""
return True
@specs.parameter('right', type(None), nullable=True)
@specs.parameter('left', nullable=False)
@specs.name('#operator_>=')
def left_gte_null(left, right):
""":yaql:operator >=
Returns true. This function is called when left is not null
and right is null.
:signature: left >= right
:arg left: left operand
:argType left: not null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> 1 >= null
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_<')
def null_lt_right(left, right):
""":yaql:operator <
Returns true. This function is called when left is null and
right is not.
:signature: left < right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null < 2
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_<=')
def null_lte_right(left, right):
""":yaql:operator <=
Returns true. This function is called when left is null and
right is not.
:signature: left <= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null <= 2
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_>')
def null_gt_right(left, right):
""":yaql:operator >
Returns false. This function is called when left is null and right
is not.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null > 2
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', nullable=False)
@specs.name('#operator_>=')
def null_gte_right(left, right):
""":yaql:operator >=
Returns false. This function is called when left is null and
right is not.
:signature: left >= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: not null
:returnType: boolean
.. code:
yaql> null >= 2
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_<')
def null_lt_null(left, right):
""":yaql:operator <
Returns false. This function is called when left and right are null.
:signature: left < right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null < null
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_<=')
def null_lte_null(left, right):
""":yaql:operator <=
Returns true. This function is called when left and right are null.
:signature: left <= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null <= null
true
"""
return True
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_>')
def null_gt_null(left, right):
""":yaql:operator >
Returns false. This function is called when left and right are null.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null > null
false
"""
return False
@specs.parameter('left', type(None), nullable=True)
@specs.parameter('right', type(None), nullable=True)
@specs.name('#operator_>=')
def null_gte_null(left, right):
""":yaql:operator >=
Returns true. This function is called when left and right are null.
:signature: left >= right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null >= null
true
"""
return True
def register(context):
context.register_function(eq)
context.register_function(neq)
context.register_function(left_lt_null)
context.register_function(left_lte_null)
context.register_function(left_gt_null)
context.register_function(left_gte_null)
context.register_function(null_lt_right)
context.register_function(null_lte_right)
context.register_function(null_gt_right)
context.register_function(null_gte_right)
context.register_function(null_lt_null)
context.register_function(null_lte_null)
context.register_function(null_gt_null)
context.register_function(null_gte_null)
|
CHT5/program-y | refs/heads/master | src/programy/parser/template/nodes/lowercase.py | 3 | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.base import TemplateNode
class TemplateLowercaseNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve(self, bot, clientid):
try:
resolved = self.resolve_children_to_string(bot, clientid)
resolved = resolved.lower()
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "LOWERCASE"
def to_xml(self, bot, clientid):
xml = "<lowercase>"
for child in self.children:
xml += child.to_xml(bot, clientid)
xml += "</lowercase>"
return xml
|
kingctan/Misago | refs/heads/master | misago/threads/tests/test_floodprotection_middleware.py | 8 | from datetime import timedelta
from django.utils import timezone
from misago.users.testutils import AuthenticatedUserTestCase
from misago.threads.posting import PostingInterrupt
from misago.threads.posting.floodprotection import (MIN_POSTING_PAUSE,
FloodProtectionMiddleware)
class FloodProtectionMiddlewareTests(AuthenticatedUserTestCase):
def test_flood_protection_middleware_on_no_posts(self):
"""middleware sets last_posted_on on user"""
self.user.update_fields = []
self.assertIsNone(self.user.last_posted_on)
middleware = FloodProtectionMiddleware(user=self.user)
middleware.interrupt_posting(None)
self.assertIsNotNone(self.user.last_posted_on)
def test_flood_protection_middleware_old_posts(self):
"""middleware is not complaining about old post"""
self.user.update_fields = []
original_last_posted_on = timezone.now() - timedelta(days=1)
self.user.last_posted_on = original_last_posted_on
middleware = FloodProtectionMiddleware(user=self.user)
middleware.interrupt_posting(None)
self.assertTrue(self.user.last_posted_on > original_last_posted_on)
def test_flood_protection_middleware_on_flood(self):
"""middleware is complaining about flood"""
self.user.last_posted_on = timezone.now()
with self.assertRaises(PostingInterrupt):
middleware = FloodProtectionMiddleware(user=self.user)
middleware.interrupt_posting(None)
|
yatinkumbhare/openstack-nova | refs/heads/master | nova/virt/hyperv/networkutilsv2.py | 85 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for network related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from nova.i18n import _
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
class NetworkUtilsV2(networkutils.NetworkUtils):
def __init__(self):
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def get_external_vswitch(self, vswitch_name):
if vswitch_name:
vswitches = self._conn.Msvm_VirtualEthernetSwitch(
ElementName=vswitch_name)
if not len(vswitches):
raise vmutils.HyperVException(_('vswitch "%s" not found')
% vswitch_name)
else:
# Find the vswitch that is connected to the first physical nic.
ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]
lep = ext_port.associators(wmi_result_class='Msvm_LANEndpoint')[0]
lep1 = lep.associators(wmi_result_class='Msvm_LANEndpoint')[0]
esw = lep1.associators(
wmi_result_class='Msvm_EthernetSwitchPort')[0]
vswitches = esw.associators(
wmi_result_class='Msvm_VirtualEthernetSwitch')
if not len(vswitches):
raise vmutils.HyperVException(_('No external vswitch found'))
return vswitches[0].path_()
def create_vswitch_port(self, vswitch_path, port_name):
raise NotImplementedError()
def vswitch_port_needed(self):
return False
|
Jarn/jarn.mkrelease | refs/heads/master | jarn/mkrelease/testing.py | 1 | import sys
import os
import unittest
import tempfile
import shutil
import zipfile
import functools
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
from os.path import realpath, join, dirname, isdir
from lazy import lazy
from jarn.mkrelease.process import Process
from jarn.mkrelease.chdir import ChdirStack, chdir
from jarn.mkrelease.scm import SCMFactory
class JailSetup(unittest.TestCase):
"""Manage a temporary working directory."""
dirstack = None
tempdir = None
def setUp(self):
self.dirstack = ChdirStack()
try:
self.tempdir = realpath(self.mkdtemp())
self.dirstack.push(self.tempdir)
except:
self.cleanUp()
raise
def tearDown(self):
self.cleanUp()
def cleanUp(self):
if self.dirstack is not None:
while self.dirstack:
self.dirstack.pop()
if self.tempdir is not None:
if isdir(self.tempdir):
shutil.rmtree(self.tempdir)
def mkdtemp(self):
return tempfile.mkdtemp()
def mkfile(self, name, body=''):
with open(name, 'wt') as file:
file.write(body)
class SandboxSetup(JailSetup):
"""Put an SCM sandbox into the jail."""
source = None
packagedir = None
def setUp(self):
JailSetup.setUp(self)
try:
package = join(dirname(__file__), 'tests', self.source)
archive = zipfile.ZipFile(package, 'r')
archive.extractall()
os.rename(self.source[:-4], 'testpackage')
self.packagedir = join(self.tempdir, 'testpackage')
except:
self.cleanUp()
raise
class SCMSetup(SandboxSetup):
"""Manage an SCM sandbox."""
name = None
clonedir = None
@lazy
def scm(self):
return SCMFactory().get_scm_from_type(self.name)
def clone(self):
raise NotImplementedError
def destroy(self, dir=None):
if dir is None:
dir = self.packagedir
shutil.rmtree(join(dir, '.'+self.name))
def modify(self, dir):
appendlines(join(dir, 'setup.py'), ['#foo'])
def verify(self, dir):
line = readlines(join(dir, 'setup.py'))[-1]
self.assertEqual(line, '#foo')
def delete(self, dir):
os.remove(join(dir, 'setup.py'))
def remove(self, dir):
raise NotImplementedError
def update(self, dir):
raise NotImplementedError
def tag(self, dir, tagid):
raise NotImplementedError
def branch(self, dir, branchid):
raise NotImplementedError
class SubversionSetup(SCMSetup):
"""Manage a Subversion sandbox."""
name = 'svn'
def setUp(self):
SCMSetup.setUp(self)
try:
self._fake_clone()
except:
self.cleanUp()
raise
@lazy
def source(self):
if self.scm.version_info[:2] >= (1, 8):
return 'testrepo.svn18.zip'
elif self.scm.version_info[:2] >= (1, 7):
return 'testrepo.svn17.zip'
else:
return 'testrepo.svn16.zip'
@lazy
def _fake_source(self):
if self.scm.version_info[:2] >= (1, 8):
return 'testpackage.svn18.zip'
elif self.scm.version_info[:2] >= (1, 7):
return 'testpackage.svn17.zip'
else:
return 'testpackage.svn16.zip'
def clone(self):
process = Process(quiet=True)
process.system('svn checkout file://%s/trunk testclone' % self.packagedir)
self.clonedir = join(self.tempdir, 'testclone')
def _fake_clone(self):
# Fake a checkout, the real thing is too expensive
process = Process(quiet=True)
source = self._fake_source
package = join(dirname(__file__), 'tests', source)
archive = zipfile.ZipFile(package, 'r')
archive.extractall()
os.rename(source[:-4], 'testclone')
self.dirstack.push('testclone')
if self.scm.version_info[:2] >= (1, 7):
url = process.popen('svn info')[1][2][5:]
else:
url = process.popen('svn info')[1][1][5:]
process.system('svn switch --relocate %s file://%s/trunk' % (url, self.packagedir))
self.dirstack.pop()
self.clonedir = join(self.tempdir, 'testclone')
def destroy(self, dir=None):
if dir is None:
dir = self.packagedir
if isdir(join(dir, 'db', 'revs')): # The svn repo
shutil.rmtree(join(dir, 'db'))
else:
for path, dirs, files in os.walk(dir):
if '.svn' in dirs:
shutil.rmtree(join(path, '.svn'))
dirs.remove('.svn')
@chdir
def modifyprop(self, dir):
process = Process(quiet=True)
if self.scm.version_info[:2] >= (1, 8):
process.system('svn propset format "text/x-python" setup.py')
else:
process.system('svn propset svn:format "text/x-python" setup.py')
@chdir
def remove(self, dir):
process = Process(quiet=True)
process.system('svn remove setup.py')
@chdir
def update(self, dir):
process = Process(quiet=True)
process.system('svn update')
def tag(self, dir, tagid):
process = Process(quiet=True)
process.system('svn cp -m"Tag" file://%s/trunk %s' % (self.packagedir, tagid))
process.system('svn co %s testtag' % tagid)
self.tagdir = join(self.tempdir, 'testtag')
def branch(self, dir, branchid):
process = Process(quiet=True)
process.system('svn cp -m"Branch" file://%s/trunk %s' % (self.packagedir, branchid))
process.system('svn co %s testbranch' % branchid)
self.branchdir = join(self.tempdir, 'testbranch')
class MercurialSetup(SCMSetup):
"""Manage a Mercurial sandbox."""
name = 'hg'
source = 'testpackage.hg.zip'
def clone(self):
process = Process(quiet=True)
process.system('hg clone testpackage testclone')
self.clonedir = join(self.tempdir, 'testclone')
@chdir
def remove(self, dir):
process = Process(quiet=True)
process.system('hg remove setup.py')
@chdir
def update(self, dir):
process = Process(quiet=True)
process.system('hg update')
@chdir
def tag(self, dir, tagid):
process = Process(quiet=True)
process.system('hg tag %s' % tagid)
process.system('hg update %s' % tagid)
@chdir
def branch(self, dir, branchid):
process = Process(quiet=True)
process.system('hg branch %s' % branchid)
class GitSetup(SCMSetup):
"""Manage a Git sandbox."""
name = 'git'
source = 'testpackage.git.zip'
def clone(self):
process = Process(quiet=True)
process.system('git clone testpackage testclone')
self.clonedir = join(self.tempdir, 'testclone')
# Park the server on a branch
self.dirstack.push('testpackage')
process.system('git checkout parking')
self.dirstack.pop()
# Make sure the clone is on master
self.dirstack.push('testclone')
process.system('git checkout master')
self.dirstack.pop()
@chdir
def remove(self, dir):
process = Process(quiet=True)
process.system('git rm setup.py')
@chdir
def update(self, dir):
process = Process(quiet=True)
process.system('git checkout master')
@chdir
def tag(self, dir, tagid):
process = Process(quiet=True)
process.system('git tag %s' % tagid)
process.system('git checkout %s' % tagid)
@chdir
def branch(self, dir, branchid):
process = Process(quiet=True)
process.system('git branch %s' % branchid)
process.system('git checkout %s' % branchid)
class MockProcessError(Exception):
"""Raised by MockProcess."""
class MockProcess(Process):
"""A Process we can tell what to return by
- passing rc and lines, or
- passing a function called as ``func(cmd)`` which returns
rc and lines.
"""
def __init__(self, rc=None, lines=None, func=None):
Process.__init__(self, quiet=True)
self.rc = rc or 0
self.lines = lines or []
self.func = func
def popen(self, cmd, echo=True, echo2=True):
if self.func is not None:
rc_lines = self.func(cmd)
if rc_lines is not None:
return rc_lines
else:
raise MockProcessError('Unhandled command: %s' % cmd)
return self.rc, self.lines
def quiet(func):
"""Decorator swallowing stdout and stderr output.
"""
def wrapper(*args, **kw):
saved = sys.stdout, sys.stderr
sys.stdout = sys.stderr = StringIO()
try:
return func(*args, **kw)
finally:
sys.stdout, sys.stderr = saved
return functools.wraps(func)(wrapper)
def readlines(filename):
"""Read lines from file 'filename'.
Lines are not newline terminated.
"""
with open(filename, 'rt') as file:
return file.read().strip().split('\n')
def appendlines(filename, lines):
"""Append 'lines' to file 'filename'.
"""
with open(filename, 'at') as file:
for line in lines:
file.write(line+'\n')
|
ehashman/oh-mainline | refs/heads/master | vendor/packages/Django/django/contrib/gis/tests/geoapp/sitemaps.py | 308 | from __future__ import absolute_import
from django.contrib.gis.sitemaps import GeoRSSSitemap, KMLSitemap, KMZSitemap
from .feeds import feed_dict
from .models import City, Country
sitemaps = {'kml' : KMLSitemap([City, Country]),
'kmz' : KMZSitemap([City, Country]),
'georss' : GeoRSSSitemap(feed_dict),
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.