repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
yukoba/sympy | refs/heads/master | sympy/physics/vector/frame.py | 47 | from sympy import (diff, trigsimp, expand, sin, cos, solve, Symbol, sympify,
eye, ImmutableMatrix as Matrix)
from sympy.core.compatibility import string_types, u, range
from sympy.physics.vector.vector import Vector, _check_vector
__all__ = ['CoordinateSym', 'ReferenceFrame']
class CoordinateSym(Symbol):
"""
A coordinate symbol/base scalar associated wrt a Reference Frame.
Ideally, users should not instantiate this class. Instances of
this class must only be accessed through the corresponding frame
as 'frame[index]'.
CoordinateSyms having the same frame and index parameters are equal
(even though they may be instantiated separately).
Parameters
==========
name : string
The display name of the CoordinateSym
frame : ReferenceFrame
The reference frame this base scalar belongs to
index : 0, 1 or 2
The index of the dimension denoted by this coordinate variable
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, CoordinateSym
>>> A = ReferenceFrame('A')
>>> A[1]
A_y
>>> type(A[0])
<class 'sympy.physics.vector.frame.CoordinateSym'>
>>> a_y = CoordinateSym('a_y', A, 1)
>>> a_y == A[1]
True
"""
def __new__(cls, name, frame, index):
obj = super(CoordinateSym, cls).__new__(cls, name)
_check_frame(frame)
if index not in range(0, 3):
raise ValueError("Invalid index specified")
obj._id = (frame, index)
return obj
@property
def frame(self):
return self._id[0]
def __eq__(self, other):
#Check if the other object is a CoordinateSym of the same frame
#and same index
if isinstance(other, CoordinateSym):
if other._id == self._id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return tuple((self._id[0].__hash__(), self._id[1])).__hash__()
class ReferenceFrame(object):
"""A reference frame in classical mechanics.
ReferenceFrame is a class used to represent a reference frame in classical
mechanics. It has a standard basis of three unit vectors in the frame's
x, y, and z directions.
It also can have a rotation relative to a parent frame; this rotation is
defined by a direction cosine matrix relating this frame's basis vectors to
the parent frame's basis vectors. It can also have an angular velocity
vector, defined in another frame.
"""
def __init__(self, name, indices=None, latexs=None, variables=None):
"""ReferenceFrame initialization method.
A ReferenceFrame has a set of orthonormal basis vectors, along with
orientations relative to other ReferenceFrames and angular velocities
relative to other ReferenceFrames.
Parameters
==========
indices : list (of strings)
If custom indices are desired for console, pretty, and LaTeX
printing, supply three as a list. The basis vectors can then be
accessed with the get_item method.
latexs : list (of strings)
If custom names are desired for LaTeX printing of each basis
vector, supply the names here in a list.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> N = ReferenceFrame('N')
>>> N.x
N.x
>>> O = ReferenceFrame('O', indices=('1', '2', '3'))
>>> O.x
O['1']
>>> O['1']
O['1']
>>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3'))
>>> vlatex(P.x)
'A1'
"""
if not isinstance(name, string_types):
raise TypeError('Need to supply a valid name')
# The if statements below are for custom printing of basis-vectors for
# each frame.
# First case, when custom indices are supplied
if indices is not None:
if not isinstance(indices, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(indices) != 3:
raise ValueError('Supply 3 indices')
for i in indices:
if not isinstance(i, string_types):
raise TypeError('Indices must be strings')
self.str_vecs = [(name + '[\'' + indices[0] + '\']'),
(name + '[\'' + indices[1] + '\']'),
(name + '[\'' + indices[2] + '\']')]
self.pretty_vecs = [(name.lower() + u("_") + indices[0]),
(name.lower() + u("_") + indices[1]),
(name.lower() + u("_") + indices[2])]
self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])), (r"\mathbf{\hat{%s}_{%s}}" %
(name.lower(), indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
self.indices = indices
# Second case, when no custom indices are supplied
else:
self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')]
self.pretty_vecs = [name.lower() + u("_x"),
name.lower() + u("_y"),
name.lower() + u("_z")]
self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()),
(r"\mathbf{\hat{%s}_y}" % name.lower()),
(r"\mathbf{\hat{%s}_z}" % name.lower())]
self.indices = ['x', 'y', 'z']
# Different step, for custom latex basis vectors
if latexs is not None:
if not isinstance(latexs, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(latexs) != 3:
raise ValueError('Supply 3 indices')
for i in latexs:
if not isinstance(i, string_types):
raise TypeError('Latex entries must be strings')
self.latex_vecs = latexs
self.name = name
self._var_dict = {}
#The _dcm_dict dictionary will only store the dcms of parent-child
#relationships. The _dcm_cache dictionary will work as the dcm
#cache.
self._dcm_dict = {}
self._dcm_cache = {}
self._ang_vel_dict = {}
self._ang_acc_dict = {}
self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict]
self._cur = 0
self._x = Vector([(Matrix([1, 0, 0]), self)])
self._y = Vector([(Matrix([0, 1, 0]), self)])
self._z = Vector([(Matrix([0, 0, 1]), self)])
#Associate coordinate symbols wrt this frame
if variables is not None:
if not isinstance(variables, (tuple, list)):
raise TypeError('Supply the variable names as a list/tuple')
if len(variables) != 3:
raise ValueError('Supply 3 variable names')
for i in variables:
if not isinstance(i, string_types):
raise TypeError('Variable names must be strings')
else:
variables = [name + '_x', name + '_y', name + '_z']
self.varlist = (CoordinateSym(variables[0], self, 0), \
CoordinateSym(variables[1], self, 1), \
CoordinateSym(variables[2], self, 2))
def __getitem__(self, ind):
"""
Returns basis vector for the provided index, if the index is a string.
If the index is a number, returns the coordinate variable correspon-
-ding to that index.
"""
if not isinstance(ind, str):
if ind < 3:
return self.varlist[ind]
else:
raise ValueError("Invalid index provided")
if self.indices[0] == ind:
return self.x
if self.indices[1] == ind:
return self.y
if self.indices[2] == ind:
return self.z
else:
raise ValueError('Not a defined index')
def __iter__(self):
return iter([self.x, self.y, self.z])
def __str__(self):
"""Returns the name of the frame. """
return self.name
__repr__ = __str__
def _dict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._dlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + self.name +
' and ' + other.name)
def _w_diff_dcm(self, otherframe):
"""Angular velocity from time differentiating the DCM. """
from sympy.physics.vector.functions import dynamicsymbols
dcm2diff = self.dcm(otherframe)
diffed = dcm2diff.diff(dynamicsymbols._t)
angvelmat = diffed * dcm2diff.T
w1 = trigsimp(expand(angvelmat[7]), recursive=True)
w2 = trigsimp(expand(angvelmat[2]), recursive=True)
w3 = trigsimp(expand(angvelmat[3]), recursive=True)
return -Vector([(Matrix([w1, w2, w3]), self)])
def variable_map(self, otherframe):
"""
Returns a dictionary which expresses the coordinate variables
of this frame in terms of the variables of otherframe.
If Vector.simp is True, returns a simplified version of the mapped
values. Else, returns them without simplification.
Simplification of the expressions may take time.
Parameters
==========
otherframe : ReferenceFrame
The other frame to map the variables to
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> A = ReferenceFrame('A')
>>> q = dynamicsymbols('q')
>>> B = A.orientnew('B', 'Axis', [q, A.z])
>>> A.variable_map(B)
{A_x: B_x*cos(q(t)) - B_y*sin(q(t)), A_y: B_x*sin(q(t)) + B_y*cos(q(t)), A_z: B_z}
"""
_check_frame(otherframe)
if (otherframe, Vector.simp) in self._var_dict:
return self._var_dict[(otherframe, Vector.simp)]
else:
vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist)
mapping = {}
for i, x in enumerate(self):
if Vector.simp:
mapping[self.varlist[i]] = trigsimp(vars_matrix[i], method='fu')
else:
mapping[self.varlist[i]] = vars_matrix[i]
self._var_dict[(otherframe, Vector.simp)] = mapping
return mapping
def ang_acc_in(self, otherframe):
"""Returns the angular acceleration Vector of the ReferenceFrame.
Effectively returns the Vector:
^N alpha ^B
which represent the angular acceleration of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular acceleration is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
_check_frame(otherframe)
if otherframe in self._ang_acc_dict:
return self._ang_acc_dict[otherframe]
else:
return self.ang_vel_in(otherframe).dt(otherframe)
def ang_vel_in(self, otherframe):
"""Returns the angular velocity Vector of the ReferenceFrame.
Effectively returns the Vector:
^N omega ^B
which represent the angular velocity of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular velocity is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
_check_frame(otherframe)
flist = self._dict_list(otherframe, 1)
outvec = Vector(0)
for i in range(len(flist) - 1):
outvec += flist[i]._ang_vel_dict[flist[i + 1]]
return outvec
def dcm(self, otherframe):
"""The direction cosine matrix between frames.
This gives the DCM between this frame and the otherframe.
The format is N.xyz = N.dcm(B) * B.xyz
A SymPy Matrix is returned.
Parameters
==========
otherframe : ReferenceFrame
The otherframe which the DCM is generated to.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> N.dcm(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
_check_frame(otherframe)
#Check if the dcm wrt that frame has already been calculated
if otherframe in self._dcm_cache:
return self._dcm_cache[otherframe]
flist = self._dict_list(otherframe, 0)
outdcm = eye(3)
for i in range(len(flist) - 1):
outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]]
#After calculation, store the dcm in dcm cache for faster
#future retrieval
self._dcm_cache[otherframe] = outdcm
otherframe._dcm_cache[self] = outdcm.T
return outdcm
def orient(self, parent, rot_type, amounts, rot_order=''):
"""Defines the orientation of this frame relative to a parent frame.
Parameters
==========
parent : ReferenceFrame
The frame that this ReferenceFrame will have its orientation matrix
defined in relation to.
rot_type : str
The type of orientation matrix that is being created. Supported
types are 'Body', 'Space', 'Quaternion', and 'Axis'. See examples
for correct usage.
amounts : list OR value
The quantities that the orientation matrix will be defined by.
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q0, q1, q2, q3, q4 = symbols('q0 q1 q2 q3 q4')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
Now we have a choice of how to implement the orientation. First is
Body. Body orientation takes this reference frame through three
successive simple rotations. Acceptable rotation orders are of length
3, expressed in XYZ or 123, and cannot have a rotation about about an
axis twice in a row.
>>> B.orient(N, 'Body', [q1, q2, q3], '123')
>>> B.orient(N, 'Body', [q1, q2, 0], 'ZXZ')
>>> B.orient(N, 'Body', [0, 0, 0], 'XYX')
Next is Space. Space is like Body, but the rotations are applied in the
opposite order.
>>> B.orient(N, 'Space', [q1, q2, q3], '312')
Next is Quaternion. This orients the new ReferenceFrame with
Quaternions, defined as a finite rotation about lambda, a unit vector,
by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
>>> B.orient(N, 'Quaternion', [q0, q1, q2, q3])
Last is Axis. This is a rotation about an arbitrary, non-time-varying
axis by some angle. The axis is supplied as a Vector. This is how
simple rotations are defined.
>>> B.orient(N, 'Axis', [q1, N.x + 2 * N.y])
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
amounts = list(amounts)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
def _rot(axis, angle):
"""DCM for simple axis 1,2,or 3 rotations. """
if axis == 1:
return Matrix([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == 2:
return Matrix([[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]])
elif axis == 3:
return Matrix([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
rot_order = str(
rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not rot_order in approved_orders:
raise TypeError('The supplied order is not an approved type')
parent_orient = []
if rot_type == 'AXIS':
if not rot_order == '':
raise TypeError('Axis orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):
raise TypeError('Amounts are a list or tuple of length 2')
theta = amounts[0]
axis = amounts[1]
axis = _check_vector(axis)
if not axis.dt(parent) == 0:
raise ValueError('Axis cannot be time-varying')
axis = axis.express(parent).normalize()
axis = axis.args[0][0]
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T)
elif rot_type == 'QUATERNION':
if not rot_order == '':
raise TypeError(
'Quaternion orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):
raise TypeError('Amounts are a list or tuple of length 4')
q0, q1, q2, q3 = amounts
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 **
2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 *
q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]]))
elif rot_type == 'BODY':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1])
* _rot(a3, amounts[2]))
elif rot_type == 'SPACE':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Space orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1])
* _rot(a1, amounts[0]))
else:
raise NotImplementedError('That is not an implemented rotation')
#Reset the _dcm_cache of this frame, and remove it from the _dcm_caches
#of the frames it is linked to. Also remove it from the _dcm_dict of
#its parent
frames = self._dcm_cache.keys()
for frame in frames:
if frame in self._dcm_dict:
del frame._dcm_dict[self]
del frame._dcm_cache[self]
#Add the dcm relationship to _dcm_dict
self._dcm_dict = self._dlist[0] = {}
self._dcm_dict.update({parent: parent_orient.T})
parent._dcm_dict.update({self: parent_orient})
#Also update the dcm cache after resetting it
self._dcm_cache = {}
self._dcm_cache.update({parent: parent_orient.T})
parent._dcm_cache.update({self: parent_orient})
if rot_type == 'QUATERNION':
t = dynamicsymbols._t
q0, q1, q2, q3 = amounts
q0d = diff(q0, t)
q1d = diff(q1, t)
q2d = diff(q2, t)
q3d = diff(q3, t)
w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)
w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)
w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)
wvec = Vector([(Matrix([w1, w2, w3]), self)])
elif rot_type == 'AXIS':
thetad = (amounts[0]).diff(dynamicsymbols._t)
wvec = thetad * amounts[1].express(parent).normalize()
else:
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = dynamicsymbols('u1, u2, u3')
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
rot_type, rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orientnew(self, newname, rot_type, amounts, rot_order='', variables=None,
indices=None, latexs=None):
"""Creates a new ReferenceFrame oriented with respect to this Frame.
See ReferenceFrame.orient() for acceptable rotation types, amounts,
and orders. Parent is going to be self.
Parameters
==========
newname : str
The name for the new ReferenceFrame
rot_type : str
The type of orientation matrix that is being created.
amounts : list OR value
The quantities that the orientation matrix will be defined by.
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
.orient() documentation:\n
========================
"""
newframe = self.__class__(newname, variables, indices, latexs)
newframe.orient(self, rot_type, amounts, rot_order)
return newframe
orientnew.__doc__ += orient.__doc__
def set_ang_acc(self, otherframe, value):
"""Define the angular acceleration Vector in a ReferenceFrame.
Defines the angular acceleration of this ReferenceFrame, in another.
Angular acceleration can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular acceleration in
value : Vector
The Vector representing angular acceleration
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_acc_dict.update({otherframe: value})
otherframe._ang_acc_dict.update({self: -value})
def set_ang_vel(self, otherframe, value):
"""Define the angular velocity vector in a ReferenceFrame.
Defines the angular velocity of this ReferenceFrame, in another.
Angular velocity can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular velocity in
value : Vector
The Vector representing angular velocity
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_vel_dict.update({otherframe: value})
otherframe._ang_vel_dict.update({self: -value})
@property
def x(self):
"""The basis Vector for the ReferenceFrame, in the x direction. """
return self._x
@property
def y(self):
"""The basis Vector for the ReferenceFrame, in the y direction. """
return self._y
@property
def z(self):
"""The basis Vector for the ReferenceFrame, in the z direction. """
return self._z
def _check_frame(other):
from .vector import VectorTypeError
if not isinstance(other, ReferenceFrame):
raise VectorTypeError(other, ReferenceFrame('A'))
|
Endika/django-debug-toolbar | refs/heads/master | tests/panels/test_template.py | 3 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.template import Context, RequestContext, Template
from ..base import BaseTestCase
from ..models import NonAsciiRepr
class TemplatesPanelTestCase(BaseTestCase):
def setUp(self):
super(TemplatesPanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('TemplatesPanel')
self.panel.enable_instrumentation()
self.sql_panel = self.toolbar.get_panel_by_id('SQLPanel')
self.sql_panel.enable_instrumentation()
def tearDown(self):
self.sql_panel.disable_instrumentation()
self.panel.disable_instrumentation()
super(TemplatesPanelTestCase, self).tearDown()
def test_queryset_hook(self):
t = Template("No context variables here!")
c = Context({
'queryset': User.objects.all(),
'deep_queryset': {
'queryset': User.objects.all(),
}
})
t.render(c)
# ensure the query was NOT logged
self.assertEqual(len(self.sql_panel._queries), 0)
ctx = self.panel.templates[0]['context'][1]
self.assertIn('<<queryset of auth.User>>', ctx)
self.assertIn('<<triggers database query>>', ctx)
def test_object_with_non_ascii_repr_in_context(self):
self.panel.process_request(self.request)
t = Template("{{ object }}")
c = Context({'object': NonAsciiRepr()})
t.render(c)
self.panel.process_response(self.request, self.response)
self.panel.generate_stats(self.request, self.response)
self.assertIn('nôt åscíì', self.panel.content)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_response.
"""
t = Template("{{ object }}")
c = Context({'object': NonAsciiRepr()})
t.render(c)
self.panel.process_response(self.request, self.response)
# ensure the panel does not have content yet.
self.assertNotIn('nôt åscíì', self.panel.content)
self.panel.generate_stats(self.request, self.response)
# ensure the panel renders correctly.
self.assertIn('nôt åscíì', self.panel.content)
def test_custom_context_processor(self):
self.panel.process_request(self.request)
t = Template("{{ content }}")
c = RequestContext(self.request, processors=[context_processor])
t.render(c)
self.panel.process_response(self.request, self.response)
self.panel.generate_stats(self.request, self.response)
self.assertIn('tests.panels.test_template.context_processor', self.panel.content)
def test_disabled(self):
config = {
'DISABLE_PANELS': set([
'debug_toolbar.panels.templates.TemplatesPanel'])
}
self.assertTrue(self.panel.enabled)
with self.settings(DEBUG_TOOLBAR_CONFIG=config):
self.assertFalse(self.panel.enabled)
def context_processor(request):
return {'content': 'set by processor'}
|
abhilashnta/edx-platform | refs/heads/master | common/test/acceptance/tests/studio/test_studio_acid_xblock.py | 130 | """
Acceptance tests for Studio related to the acid xblock.
"""
from bok_choy.web_app_test import WebAppTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(WebAppTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
# Define a unique course identifier
self.course_info = {
'org': 'test_org',
'number': 'course_' + self.unique_id[:5],
'run': 'test_' + self.unique_id,
'display_name': 'Test Course ' + self.unique_id
}
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_id = '{org}.{number}.{run}'.format(**self.course_info)
self.setup_fixtures()
self.auth_page = AutoAuthPage(
self.browser,
staff=False,
username=self.user.get('username'),
email=self.user.get('email'),
password=self.user.get('password')
)
self.auth_page.visit()
def validate_acid_block_preview(self, acid_block):
"""
Validate the Acid Block's preview
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
def test_acid_block_editor(self):
"""
Verify that all expected acid block tests pass in studio editor
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
acid_block = AcidView(self.browser, unit.xblocks[0].edit().editor_selector)
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidParentBase(XBlockAcidBase):
"""
Base class for tests that verify that parent XBlock integration is working correctly
"""
__test__ = False
def validate_acid_block_preview(self, acid_block):
super(XBlockAcidParentBase, self).validate_acid_block_preview(acid_block)
self.assertTrue(acid_block.child_tests_passed)
def test_acid_block_preview(self):
"""
Verify that all expected acid block tests pass in studio preview
"""
self.outline.visit()
subsection = self.outline.section('Test Section').subsection('Test Subsection')
unit = subsection.expand_subsection().unit('Test Unit').go_to()
container = unit.xblocks[0].go_to_container()
acid_block = AcidView(self.browser, container.xblocks[0].preview_selector)
self.validate_acid_block_preview(acid_block)
class XBlockAcidEmptyParentTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
)
)
)
)
).install()
self.user = course_fix.user
class XBlockAcidChildTest(XBlockAcidParentBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
self.user = course_fix.user
def test_acid_block_preview(self):
super(XBlockAcidChildTest, self).test_acid_block_preview()
def test_acid_block_editor(self):
super(XBlockAcidChildTest, self).test_acid_block_editor()
|
biodrone/plex-desk | refs/heads/master | desk/flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py | 1775 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
hmoraes/dynamic-stream-server | refs/heads/master | dss/web_handlers/info.py | 2 | import tornado.web
from bson import json_util
from .. import providers
options = '|'.join([
'provider', 'stream',
])
class InfoHandler(tornado.web.RequestHandler):
def get(self, opt, id=None, **kw):
data = None
if opt == 'provider':
providers_ = providers.Providers.enabled()
if not id:
data = [
{'name': p.name, 'id': k}
for k, p in providers_.items()
]
else:
try:
data = list(providers_[id].stream_data().values())
except KeyError:
self.set_status(404)
return
elif opt == 'stream':
if not id:
self.set_status(404)
return
data = providers.Providers.select(id).get_stream_data(id)
self.set_header('Content-Type', 'application/json')
self.finish(json_util.dumps(data))
post = get
|
wesyoung/pyzyre | refs/heads/master | buildutils/zmq/configure.py | 1 | from __future__ import with_statement, print_function
import os
import shutil
import sys
from distutils.ccompiler import get_default_compiler
from distutils.ccompiler import new_compiler
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
from glob import glob
from os.path import basename, join as pjoin
from os.path import basename, join as pjoin
from subprocess import Popen, PIPE
from .bundle import bundled_version, fetch_libzmq, localpath
from .msg import fatal, warn, info, line
libzmq_name = 'libzmq'
pypy = 'PyPy' in sys.version
# reference points for zmq compatibility
min_legacy_zmq = (2, 1, 4)
min_good_zmq = (3, 2)
target_zmq = bundled_version
dev_zmq = (target_zmq[0], target_zmq[1] + 1, 0)
HERE = os.path.dirname(__file__)
ROOT = os.path.dirname(HERE)
# set dylib ext:
if sys.platform.startswith('win'):
lib_ext = '.dll'
elif sys.platform == 'darwin':
lib_ext = '.dylib'
else:
lib_ext = '.so'
def stage_platform_header(zmqroot):
platform_h = pjoin(zmqroot, 'src', 'platform.hpp')
if os.path.exists(platform_h):
info("already have platform.hpp")
return
if os.name == 'nt':
# stage msvc platform header
platform_dir = pjoin(zmqroot, 'builds', 'msvc')
else:
if not os.path.exists(pjoin(zmqroot, 'configure')):
info('attempting bash autogen.sh')
p = Popen('./autogen.sh', cwd=zmqroot, shell=True, stdout=PIPE, stderr=PIPE)
o, e = p.communicate()
if p.returncode:
raise RuntimeError('Failed to run autoconf...')
info("attempting ./configure to generate platform.h")
p = Popen('./configure', cwd=zmqroot, shell=True, stdout=PIPE, stderr=PIPE)
o, e = p.communicate()
if p.returncode:
warn("failed to configure libczmq:\n%s" % e)
if sys.platform == 'darwin':
platform_dir = pjoin(HERE, 'include_darwin')
elif sys.platform.startswith('freebsd'):
platform_dir = pjoin(HERE, 'include_freebsd')
elif sys.platform.startswith('linux-armv'):
platform_dir = pjoin(HERE, 'include_linux-armv')
else:
platform_dir = pjoin(HERE, 'include_linux')
else:
return
info("staging platform.h from: %s" % platform_dir)
shutil.copy(pjoin(platform_dir, 'platform.h'), platform_h)
class Configure(build_ext):
"""Configure command adapted from h5py"""
description = "Discover ZMQ version and features"
user_options = build_ext.user_options + [
('zmq=', None, "libzmq install prefix"),
('build-base=', 'b', "base directory for build library"), # build_base from build
('embed', None, '')
]
def initialize_options(self):
build_ext.initialize_options(self)
self.zmq = None
self.build_base = 'build'
self.embed = False
# DON'T REMOVE: distutils demands these be here even if they do nothing.
def finalize_options(self):
build_ext.finalize_options(self)
self.tempdir = pjoin(self.build_temp, 'scratch')
self.has_run = False
def create_tempdir(self):
self.erase_tempdir()
os.makedirs(self.tempdir)
if sys.platform.startswith('win'):
# fetch libzmq.dll into local dir
local_dll = pjoin(self.tempdir, libzmq_name + '.dll')
if not self.config['zmq_prefix'] and not os.path.exists(local_dll):
fatal("ZMQ directory must be specified on Windows via setup.cfg"
" or 'python setup.py configure --zmq=/path/to/zeromq2'")
try:
shutil.copy(pjoin(self.config['zmq_prefix'], 'lib', libzmq_name + '.dll'), local_dll)
except Exception:
if not os.path.exists(local_dll):
warn("Could not copy " + libzmq_name + " into zmq/, which is usually necessary on Windows."
"Please specify zmq prefix via configure --zmq=/path/to/zmq or copy "
+ libzmq_name + " into zmq/ manually.")
def erase_tempdir(self):
try:
shutil.rmtree(self.tempdir)
except Exception:
pass
@property
def compiler_type(self):
compiler = self.compiler
if compiler is None:
return get_default_compiler()
elif isinstance(compiler, str):
return compiler
else:
return compiler.compiler_type
@property
def cross_compiling(self):
return self.config['bdist_egg'].get('plat-name', sys.platform) != sys.platform
def bundle_libzmq_extension(self):
bundledir = "bundled"
ext_modules = self.distribution.ext_modules
if ext_modules and any(m.name == 'czmq.libzmq' for m in ext_modules):
# I've already been run
return
line()
info("Using bundled libzmq")
# fetch sources for libzmq extension:
if not os.path.exists(bundledir):
os.makedirs(bundledir)
fetch_libzmq(bundledir)
stage_platform_header(pjoin(bundledir, 'zeromq'))
sources = [pjoin('buildutils', 'zmq', 'initlibzmq.c')]
sources += glob(pjoin(bundledir, 'zeromq', 'src', '*.cpp'))
includes = [
pjoin(bundledir, 'zeromq', 'include')
]
if bundled_version < (4, 2, 0):
tweetnacl = pjoin(bundledir, 'zeromq', 'tweetnacl')
tweetnacl_sources = glob(pjoin(tweetnacl, 'src', '*.c'))
randombytes = pjoin(tweetnacl, 'contrib', 'randombytes')
if sys.platform.startswith('win'):
tweetnacl_sources.append(pjoin(randombytes, 'winrandom.c'))
else:
tweetnacl_sources.append(pjoin(randombytes, 'devurandom.c'))
sources += tweetnacl_sources
includes.append(pjoin(tweetnacl, 'src'))
includes.append(randombytes)
else:
# >= 4.2
sources += glob(pjoin(bundledir, 'zeromq', 'src', 'tweetnacl.c'))
libzmq = Extension(
'czmq.libzmq',
sources=sources,
include_dirs=includes,
)
# http://stackoverflow.com/a/32765319/7205341
if sys.platform == 'darwin':
from distutils import sysconfig
vars = sysconfig.get_config_vars()
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-dynamiclib')
# register the extension:
self.distribution.ext_modules.insert(0, libzmq)
# use tweetnacl to provide CURVE support
libzmq.define_macros.append(('ZMQ_HAVE_CURVE', 1))
libzmq.define_macros.append(('ZMQ_USE_TWEETNACL', 1))
# select polling subsystem based on platform
if sys.platform == 'darwin' or 'bsd' in sys.platform:
libzmq.define_macros.append(('ZMQ_USE_KQUEUE', 1))
elif 'linux' in sys.platform:
libzmq.define_macros.append(('ZMQ_USE_EPOLL', 1))
elif sys.platform.startswith('win'):
libzmq.define_macros.append(('ZMQ_USE_SELECT', 1))
else:
# this may not be sufficiently precise
libzmq.define_macros.append(('ZMQ_USE_POLL', 1))
if sys.platform.startswith('win'):
# include defines from zeromq msvc project:
libzmq.define_macros.append(('FD_SETSIZE', 16384))
libzmq.define_macros.append(('DLL_EXPORT', 1))
libzmq.define_macros.append(('_CRT_SECURE_NO_WARNINGS', 1))
# When compiling the C++ code inside of libzmq itself, we want to
# avoid "warning C4530: C++ exception handler used, but unwind
# semantics are not enabled. Specify /EHsc".
if self.compiler_type == 'msvc':
libzmq.extra_compile_args.append('/EHsc')
elif self.compiler_type == 'mingw32':
libzmq.define_macros.append(('ZMQ_HAVE_MINGW32', 1))
# And things like sockets come from libraries that must be named.
libzmq.libraries.extend(['rpcrt4', 'ws2_32', 'advapi32'])
# bundle MSCVP redist
if self.config['bundle_msvcp']:
cc = new_compiler(compiler=self.compiler_type)
cc.initialize()
# get vc_redist location via private API
try:
cc._vcruntime_redist
except AttributeError:
# fatal error if env set, warn otherwise
msg = fatal if os.environ.get("PYZMQ_BUNDLE_CRT") else warn
msg("Failed to get cc._vcruntime via private API, not bundling CRT")
if cc._vcruntime_redist:
redist_dir, dll = os.path.split(cc._vcruntime_redist)
to_bundle = [
pjoin(redist_dir, dll.replace('vcruntime', name))
for name in ('msvcp', 'concrt')
]
for src in to_bundle:
dest = localpath('zmq', basename(src))
info("Copying %s -> %s" % (src, dest))
# copyfile to avoid permission issues
shutil.copyfile(src, dest)
else:
libzmq.include_dirs.append(bundledir)
# check if we need to link against Realtime Extensions library
cc = new_compiler(compiler=self.compiler_type)
cc.output_dir = self.build_temp
libzmq.libraries.append("stdc++")
if not sys.platform.startswith(('darwin', 'freebsd')):
line()
info("checking for timer_create")
if not cc.has_function('timer_create'):
info("no timer_create, linking librt")
libzmq.libraries.append('rt')
else:
info("ok")
if pypy:
# seem to need explicit libstdc++ on linux + pypy
# not sure why
libzmq.libraries.append("stdc++")
def run(self):
self.bundle_libzmq_extension()
|
GiedriusM/openthread | refs/heads/master | tools/harness-automation/cases/commissioner_9_2_1.py | 16 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_9_2_1(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '9 2 1'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
vellamike/maltese_spellchecker_norvig | refs/heads/master | spellcorrect/operator_evaluators.py | 1 | """
The idea here is to look at the result of every operation
and price it. Give a finer idea of how expensive the operation
was. This is tricky in some way - is a i -> ie mapping cheaper than
a h to k mapping? Where is the correct place for this? is it in the
operations? My instinct is that the cost of an operation should map how
difficult it is to make the mistake in the context of the language, an i to ie
mistake is very common and should therefore be cheap. HOWEVER you may also argue that
that is the job of a more complex bayseian analysis to figure out.
"""
|
defance/edx-platform | refs/heads/master | lms/djangoapps/courseware/student_field_overrides.py | 164 | """
API related to providing field overrides for individual students. This is used
by the individual due dates feature.
"""
import json
from .field_overrides import FieldOverrideProvider
from .models import StudentFieldOverride
class IndividualStudentOverrideProvider(FieldOverrideProvider):
"""
A concrete implementation of
:class:`~courseware.field_overrides.FieldOverrideProvider` which allows for
overrides to be made on a per user basis.
"""
def get(self, block, name, default):
return get_override_for_user(self.user, block, name, default)
@classmethod
def enabled_for(cls, course):
"""This simple override provider is always enabled"""
return True
def get_override_for_user(user, block, name, default=None):
"""
Gets the value of the overridden field for the `user`. `block` and `name`
specify the block and the name of the field. If the field is not
overridden for the given user, returns `default`.
"""
if not hasattr(block, '_student_overrides'):
block._student_overrides = {} # pylint: disable=protected-access
overrides = block._student_overrides.get(user.id) # pylint: disable=protected-access
if overrides is None:
overrides = _get_overrides_for_user(user, block)
block._student_overrides[user.id] = overrides # pylint: disable=protected-access
return overrides.get(name, default)
def _get_overrides_for_user(user, block):
"""
Gets all of the individual student overrides for given user and block.
Returns a dictionary of field override values keyed by field name.
"""
query = StudentFieldOverride.objects.filter(
course_id=block.runtime.course_id,
location=block.location,
student_id=user.id,
)
overrides = {}
for override in query:
field = block.fields[override.field]
value = field.from_json(json.loads(override.value))
overrides[override.field] = value
return overrides
def override_field_for_user(user, block, name, value):
"""
Overrides a field for the `user`. `block` and `name` specify the block
and the name of the field on that block to override. `value` is the
value to set for the given field.
"""
override, _ = StudentFieldOverride.objects.get_or_create(
course_id=block.runtime.course_id,
location=block.location,
student_id=user.id,
field=name)
field = block.fields[name]
override.value = json.dumps(field.to_json(value))
override.save()
def clear_override_for_user(user, block, name):
"""
Clears a previously set field override for the `user`. `block` and `name`
specify the block and the name of the field on that block to clear.
This function is idempotent--if no override is set, nothing action is
performed.
"""
try:
StudentFieldOverride.objects.get(
course_id=block.runtime.course_id,
student_id=user.id,
location=block.location,
field=name).delete()
except StudentFieldOverride.DoesNotExist:
pass
|
rahul-ramadas/BagOfTricks | refs/heads/master | RunMultipleCommands.py | 1 | import sublime
import sublime_plugin
class RunMultipleCommandsCommand(sublime_plugin.TextCommand):
def run(self, edit, commands=None, command=None, times=1):
if commands is None:
commands = [command] if command is not None else []
for _ in range(times):
for command in commands:
self.exec_command(command)
def exec_command(self, command):
if not "command" in command:
if isinstance(command, str):
command = {"command": command}
else:
raise ValueError("No command name provided.")
args = command.get("args")
contexts = {
"window": self.view.window(), "app": sublime, "text": self.view}
context = contexts[command.get("context", "text")]
context.run_command(command["command"], args)
|
connoranderson/Speechables | refs/heads/master | mechanize-0.2.5/test-tools/unittest/runner.py | 22 | """Running tests"""
import sys
import time
from unittest import result
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class _TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(_TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
if self.descriptions:
return test.shortDescription() or str(test)
else:
return str(test)
def startTest(self, test):
super(_TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(_TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(_TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(_TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(_TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(_TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(_TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
|
vigneras/sequencer | refs/heads/master | tests/test_smartdisplay.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) Bull S.A.S (2010, 2011)
# Contributor: Pierre Vignéras <pierre.vigneras@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from sequencer import commons
from sequencer.commons import smart_display, REMOVE_UNSPECIFIED_COLUMNS, \
FILL_EMPTY_ENTRY
import logging
import re
import sys
import unittest
"""Test the smart_display() routine."""
class SmartDisplayTest(unittest.TestCase):
def testBadArg(self):
self.assertRaises(AssertionError, smart_display, None, ['foo'])
self.assertRaises(AssertionError, smart_display, [], ['foo'])
self.assertRaises(AssertionError, smart_display, ['dummy'], None)
self.assertRaises(AssertionError,
smart_display, ['dummy'], ['foo'], hsep=None)
self.assertRaises(AssertionError,
smart_display, ['dummy'], ['foo'], vsep=None)
# justify should have the same length than header when specified.
self.assertRaises(AssertionError,
smart_display, ['dummy'], ['foo'], justify=[])
# def testLengthHeaderDataDiffers(self):
# # Each data row should have the same length than the header
# self.assertRaises(AssertionError, smart_display, ['bar', 'foo'], [['data1']])
# self.assertRaises(AssertionError, smart_display, ['bar'], [['data1', 'data2']])
def testSimpleOutput(self):
output = smart_display([u"Title"], [[u"data1"], [u"data2"]])
self.assertIsNotNone(re.search(r'^.*Title.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*data1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*data2.*$', output,
flags=re.MULTILINE),
output)
def testRemoveAllColumns(self):
# Removing single column -> no output at all!!
output = smart_display([u"Title"], [[u"data1"], [u"data2"]],
columns_max={'Title':0})
self.assertIsNone(re.search(r'^.*Title.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNone(re.search(r'^.*data.*$', output,
flags=re.MULTILINE),
output)
def testRemoveSomeColumns(self):
# Two columns only one remain
output = smart_display([u"T1", u"T2"],
[[u"d1.1", u"d2.1"],
[u"d1.2", u"d2.2"]],
columns_max={'T1':0})
self.assertIsNone(re.search(r'^.*T1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNone(re.search(r'^.*d1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*T2.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*d2.*$', output,
flags=re.MULTILINE),
output)
def testSpecifySingleColumn(self):
output = smart_display([u"T1", u"T2"],
[[u"d1.1", u"d2.1"],
[u"d1.2", u"d2.2"]],
columns_max={'T2': REMOVE_UNSPECIFIED_COLUMNS})
self.assertIsNone(re.search(r'^.*T1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNone(re.search(r'^.*d1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*T2.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*d2.*$', output,
flags=re.MULTILINE),
output)
def testSpecifyMultipleColumns(self):
output = smart_display([u"T1", u"T2", u"T3"],
[[u"d1.1", u"d2.1", u"d3.1"],
[u"d1.2", u"d2.2", u"d3.2"]],
columns_max={u'T2': REMOVE_UNSPECIFIED_COLUMNS,
u'T3': REMOVE_UNSPECIFIED_COLUMNS})
self.assertIsNone(re.search(r'^.*T1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNone(re.search(r'^.*d1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*T2.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*d2.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*T3.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*d3.*$', output,
flags=re.MULTILINE),
output)
def testFILLERSpecified(self):
output = smart_display([u"T1", u"T2"],
[[u"d1.1", u"d2.1"],
[FILL_EMPTY_ENTRY, u"d2.2"]])
self.assertIsNotNone(re.search(r'^.*T1.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*d1.*$', output,
flags=re.MULTILINE),
output)
# Here a faked sample of what we are looking for:
match = re.search(r'^-+ +| +d2.2 $', '------ | d2.2',
flags=re.MULTILINE)
print("Faked matching at: %s" % match.string[match.start():match.end()])
assert match is not None
# Now, doing the same on the actual output
print(output)
match = re.search(r'^-+ +| +d2.2 $', output, flags=re.MULTILINE)
self.assertIsNotNone(match, output)
print("Matching at: %s" % match.string[match.start():match.end()])
self.assertIsNotNone(re.search(r'^.*T2.*$', output,
flags=re.MULTILINE),
output)
self.assertIsNotNone(re.search(r'^.*d2.*$', output,
flags=re.MULTILINE),
output)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
logging.basicConfig(stream=sys.stderr)
logging.getLogger(commons.__name__).setLevel(logging.DEBUG)
unittest.main()
|
broderboy/ai-stager | refs/heads/master | stager/django_evolution/tests/db/sqlite3.py | 3 | add_field = {
'AddNonNullNonCallableColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = 1;',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddNonNullCallableColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = "int_field";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddNullColumnWithInitialColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = 1;',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddStringColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" varchar(10) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = \'abc\\\'s xyz\';',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" varchar(10) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddDateColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" datetime NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = 2007-12-13 16:42:00;',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" datetime NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddDefaultColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = 42;',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddEmptyStringDefaultColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" varchar(20) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = \'\';',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" varchar(20) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddNullColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'NonDefaultColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "non-default_column" integer NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "non-default_column" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "non-default_column" integer NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "non-default_column") SELECT "int_field", "id", "char_field", "non-default_column" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddColumnCustomTableModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("id" integer NOT NULL UNIQUE PRIMARY KEY, "value" integer NOT NULL, "alt_value" varchar(20) NOT NULL, "added_field" integer NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "id", "value", "alt_value", "added_field" FROM "custom_table_name";',
'DROP TABLE "custom_table_name";',
'CREATE TABLE "custom_table_name"("id" integer NOT NULL UNIQUE PRIMARY KEY, "value" integer NOT NULL, "alt_value" varchar(20) NOT NULL, "added_field" integer NULL);',
'INSERT INTO "custom_table_name" ("id", "value", "alt_value", "added_field") SELECT "id", "value", "alt_value", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddIndexedColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "add_field" integer NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "add_field" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "add_field" integer NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "add_field") SELECT "int_field", "id", "char_field", "add_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE INDEX "tests_testmodel_add_field" ON "tests_testmodel" ("add_field");',
]),
'AddUniqueColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL UNIQUE);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL UNIQUE);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddUniqueIndexedModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL UNIQUE);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NULL UNIQUE);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'AddForeignKeyModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field_id" integer NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field_id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field_id" integer NULL);',
'INSERT INTO "tests_testmodel" ("int_field", "id", "char_field", "added_field_id") SELECT "int_field", "id", "char_field", "added_field_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE INDEX "tests_testmodel_added_field_id" ON "tests_testmodel" ("added_field_id");',
]),
'AddManyToManyDatabaseTableModel':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" integer NOT NULL PRIMARY KEY,',
' "testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id"),',
' "addanchor1_id" integer NOT NULL REFERENCES "tests_addanchor1" ("id"),',
' UNIQUE ("testmodel_id", "addanchor1_id")',
')',
';',
]),
'AddManyToManyNonDefaultDatabaseTableModel':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" integer NOT NULL PRIMARY KEY,',
' "testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id"),',
' "addanchor2_id" integer NOT NULL REFERENCES "custom_add_anchor_table" ("id"),',
' UNIQUE ("testmodel_id", "addanchor2_id")',
')',
';',
]),
'AddManyToManySelf':
'\n'.join([
'CREATE TABLE "tests_testmodel_added_field" (',
' "id" integer NOT NULL PRIMARY KEY,',
' "from_testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id"),',
' "to_testmodel_id" integer NOT NULL REFERENCES "tests_testmodel" ("id"),',
' UNIQUE ("from_testmodel_id", "to_testmodel_id")',
')',
';',
]),
}
delete_field = {
'DefaultNamedColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("non-default_db_column" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "fk_field1_id" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "non-default_db_column", "int_field3", "fk_field1_id", "char_field", "my_id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("non-default_db_column" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "fk_field1_id" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_fk_field1_id" ON "tests_testmodel" ("fk_field1_id");',
'INSERT INTO "tests_testmodel" ("non-default_db_column", "int_field3", "fk_field1_id", "char_field", "my_id") SELECT "non-default_db_column", "int_field3", "fk_field1_id", "char_field", "my_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'NonDefaultNamedColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "fk_field1_id" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "int_field3", "fk_field1_id", "char_field", "my_id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "fk_field1_id" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_fk_field1_id" ON "tests_testmodel" ("fk_field1_id");',
'INSERT INTO "tests_testmodel" ("int_field", "int_field3", "fk_field1_id", "char_field", "my_id") SELECT "int_field", "int_field3", "fk_field1_id", "char_field", "my_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'ConstrainedColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "non-default_db_column" integer NOT NULL, "fk_field1_id" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "non-default_db_column", "fk_field1_id", "char_field", "my_id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "non-default_db_column" integer NOT NULL, "fk_field1_id" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_fk_field1_id" ON "tests_testmodel" ("fk_field1_id");',
'INSERT INTO "tests_testmodel" ("int_field", "non-default_db_column", "fk_field1_id", "char_field", "my_id") SELECT "int_field", "non-default_db_column", "fk_field1_id", "char_field", "my_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'DefaultManyToManyModel':
'DROP TABLE "tests_testmodel_m2m_field1";',
'NonDefaultManyToManyModel':
'DROP TABLE "non-default_m2m_table";',
'DeleteForeignKeyModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "non-default_db_column" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "non-default_db_column", "int_field3", "char_field", "my_id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "non-default_db_column" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "tests_testmodel" ("int_field", "non-default_db_column", "int_field3", "char_field", "my_id") SELECT "int_field", "non-default_db_column", "int_field3", "char_field", "my_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'DeleteColumnCustomTableModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("id" integer NOT NULL UNIQUE PRIMARY KEY, "alt_value" varchar(20) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "id", "alt_value" FROM "custom_table_name";',
'DROP TABLE "custom_table_name";',
'CREATE TABLE "custom_table_name"("id" integer NOT NULL UNIQUE PRIMARY KEY, "alt_value" varchar(20) NOT NULL);',
'INSERT INTO "custom_table_name" ("id", "alt_value") SELECT "id", "alt_value" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
}
change_field = {
"SetNotNullChangeModelWithConstant":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NOT NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "char_field1" = \'abc\\\'s xyz\';',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NOT NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"SetNotNullChangeModelWithCallable":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NOT NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'UPDATE "TEMP_TABLE" SET "char_field1" = "char_field";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NOT NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"SetNullChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"NoOpChangeModel": '',
"IncreasingMaxLengthChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(45) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(45) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"DecreasingMaxLengthChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(1) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(1) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"DBColumnChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "customised_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "customised_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'CREATE INDEX "tests_testmodel_int_field1" ON "tests_testmodel" ("int_field1");',
'INSERT INTO "tests_testmodel" ("int_field4", "customised_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "customised_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"M2MDBTableChangeModel": 'ALTER TABLE "change_field_non-default_m2m_table" RENAME TO "custom_m2m_db_table_name";',
"AddDBIndexChangeModel": 'CREATE INDEX "tests_testmodel_int_field2" ON "tests_testmodel" ("int_field2");',
"RemoveDBIndexChangeModel": 'DROP INDEX "tests_testmodel_int_field1";',
"AddUniqueChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL UNIQUE, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL UNIQUE, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"RemoveUniqueChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"MultiAttrChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column2" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column2" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'CREATE INDEX "tests_testmodel_int_field1" ON "tests_testmodel" ("int_field1");',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column2", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column2", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column2" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(35) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column2", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column2" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(35) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column2", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column2", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"MultiAttrSingleFieldChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(35) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(35) NOT NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(35) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(35) NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
"RedundantAttrsChangeModel":
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column3" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column3" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(20) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'CREATE INDEX "tests_testmodel_int_field1" ON "tests_testmodel" ("int_field1");',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column3", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column3", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field4" integer NOT NULL, "custom_db_column3" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(35) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field4", "custom_db_column3", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field4" integer NOT NULL, "custom_db_column3" integer NOT NULL, "int_field1" integer NOT NULL, "int_field2" integer NOT NULL, "int_field3" integer NOT NULL UNIQUE, "alt_pk" integer NOT NULL, "char_field" varchar(35) NOT NULL, "my_id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field1" varchar(25) NULL, "char_field2" varchar(30) NULL);',
'INSERT INTO "tests_testmodel" ("int_field4", "custom_db_column3", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2") SELECT "int_field4", "custom_db_column3", "int_field1", "int_field2", "int_field3", "alt_pk", "char_field", "my_id", "char_field1", "char_field2" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
}
delete_model = {
'BasicModel':
'DROP TABLE "tests_basicmodel";',
'BasicWithM2MModel':
'\n'.join([
'DROP TABLE "tests_basicwithm2mmodel_m2m";',
'DROP TABLE "tests_basicwithm2mmodel";'
]),
'CustomTableModel':
'DROP TABLE "custom_table_name";',
'CustomTableWithM2MModel':
'\n'.join([
'DROP TABLE "another_custom_table_name_m2m";',
'DROP TABLE "another_custom_table_name";'
]),
}
delete_application = {
'DeleteApplication':
'\n'.join([
'DROP TABLE "tests_appdeleteanchor1";',
'DROP TABLE "tests_testmodel_anchor_m2m";',
'DROP TABLE "tests_testmodel";',
'DROP TABLE "app_delete_custom_add_anchor_table";',
'DROP TABLE "app_delete_custom_table_name";',
]),
}
rename_field = {
'RenameColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("custom_db_col_name" integer NOT NULL, "char_field" varchar(20) NOT NULL, "renamed_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "custom_db_col_name", "char_field", "int_field", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("custom_db_col_name" integer NOT NULL, "char_field" varchar(20) NOT NULL, "renamed_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_fk_field_id" ON "tests_testmodel" ("fk_field_id");',
'INSERT INTO "tests_testmodel" ("custom_db_col_name", "char_field", "renamed_field", "custom_db_col_name_indexed", "fk_field_id", "id") SELECT "custom_db_col_name", "char_field", "renamed_field", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameColumnWithTableNameModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("custom_db_col_name" integer NOT NULL, "char_field" varchar(20) NOT NULL, "renamed_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "custom_db_col_name", "char_field", "int_field", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("custom_db_col_name" integer NOT NULL, "char_field" varchar(20) NOT NULL, "renamed_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_fk_field_id" ON "tests_testmodel" ("fk_field_id");',
'INSERT INTO "tests_testmodel" ("custom_db_col_name", "char_field", "renamed_field", "custom_db_col_name_indexed", "fk_field_id", "id") SELECT "custom_db_col_name", "char_field", "renamed_field", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenamePrimaryKeyColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("custom_db_col_name" integer NOT NULL, "char_field" varchar(20) NOT NULL, "int_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "my_pk_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "custom_db_col_name", "char_field", "int_field", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("custom_db_col_name" integer NOT NULL, "char_field" varchar(20) NOT NULL, "int_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "my_pk_id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_fk_field_id" ON "tests_testmodel" ("fk_field_id");',
'INSERT INTO "tests_testmodel" ("custom_db_col_name", "char_field", "int_field", "custom_db_col_name_indexed", "fk_field_id", "my_pk_id") SELECT "custom_db_col_name", "char_field", "int_field", "custom_db_col_name_indexed", "fk_field_id", "my_pk_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameForeignKeyColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "custom_db_col_name" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "renamed_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "char_field", "custom_db_col_name", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "custom_db_col_name" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "renamed_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_renamed_field_id" ON "tests_testmodel" ("renamed_field_id");',
'INSERT INTO "tests_testmodel" ("int_field", "char_field", "custom_db_col_name", "custom_db_col_name_indexed", "renamed_field_id", "id") SELECT "int_field", "char_field", "custom_db_col_name", "custom_db_col_name_indexed", "renamed_field_id", "id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameNonDefaultColumnNameModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "renamed_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "char_field", "custom_db_col_name", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "renamed_field" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_fk_field_id" ON "tests_testmodel" ("fk_field_id");',
'INSERT INTO "tests_testmodel" ("int_field", "char_field", "renamed_field", "custom_db_col_name_indexed", "fk_field_id", "id") SELECT "int_field", "char_field", "renamed_field", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameNonDefaultColumnNameToNonDefaultNameModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "non-default_column_name" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "char_field", "custom_db_col_name", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "non-default_column_name" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_fk_field_id" ON "tests_testmodel" ("fk_field_id");',
'INSERT INTO "tests_testmodel" ("int_field", "char_field", "non-default_column_name", "custom_db_col_name_indexed", "fk_field_id", "id") SELECT "int_field", "char_field", "non-default_column_name", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameNonDefaultColumnNameToNonDefaultNameAndTableModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "non-default_column_name2" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "char_field", "custom_db_col_name", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "char_field" varchar(20) NOT NULL, "non-default_column_name2" integer NOT NULL, "custom_db_col_name_indexed" integer NOT NULL, "fk_field_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY);',
'CREATE INDEX "tests_testmodel_custom_db_col_name_indexed" ON "tests_testmodel" ("custom_db_col_name_indexed");',
'CREATE INDEX "tests_testmodel_fk_field_id" ON "tests_testmodel" ("fk_field_id");',
'INSERT INTO "tests_testmodel" ("int_field", "char_field", "non-default_column_name2", "custom_db_col_name_indexed", "fk_field_id", "id") SELECT "int_field", "char_field", "non-default_column_name2", "custom_db_col_name_indexed", "fk_field_id", "id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameColumnCustomTableModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("id" integer NOT NULL UNIQUE PRIMARY KEY, "renamed_field" integer NOT NULL, "alt_value" varchar(20) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "id", "value", "alt_value" FROM "custom_rename_table_name";',
'DROP TABLE "custom_rename_table_name";',
'CREATE TABLE "custom_rename_table_name"("id" integer NOT NULL UNIQUE PRIMARY KEY, "renamed_field" integer NOT NULL, "alt_value" varchar(20) NOT NULL);',
'INSERT INTO "custom_rename_table_name" ("id", "renamed_field", "alt_value") SELECT "id", "renamed_field", "alt_value" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'RenameManyToManyTableModel':
'ALTER TABLE "tests_testmodel_m2m_field" RENAME TO "tests_testmodel_renamed_field";',
'RenameManyToManyTableWithColumnNameModel':
'ALTER TABLE "tests_testmodel_m2m_field" RENAME TO "tests_testmodel_renamed_field";',
'RenameNonDefaultManyToManyTableModel':
'ALTER TABLE "non-default_db_table" RENAME TO "tests_testmodel_renamed_field";',
}
sql_mutation = {
'SQLMutationSequence': """[
... SQLMutation('first-two-fields', [
... 'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field1" integer NULL;',
... 'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field2" integer NULL;'
... ], update_first_two),
... SQLMutation('third-field', [
... 'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field3" integer NULL;',
... ], update_third)]
""",
'SQLMutationOutput':
'\n'.join([
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field1" integer NULL;',
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field2" integer NULL;',
'ALTER TABLE "tests_testmodel" ADD COLUMN "added_field3" integer NULL;',
]),
}
generics = {
'DeleteColumnModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "content_type_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "object_id" integer unsigned NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "content_type_id", "id", "object_id" FROM "tests_testmodel";',
'DROP TABLE "tests_testmodel";',
'CREATE TABLE "tests_testmodel"("int_field" integer NOT NULL, "content_type_id" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "object_id" integer unsigned NOT NULL);',
'CREATE INDEX "tests_testmodel_content_type_id" ON "tests_testmodel" ("content_type_id");',
'CREATE INDEX "tests_testmodel_object_id" ON "tests_testmodel" ("object_id");',
'INSERT INTO "tests_testmodel" ("int_field", "content_type_id", "id", "object_id") SELECT "int_field", "content_type_id", "id", "object_id" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
])
}
inheritance = {
'AddToChildModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "int_field", "id", "char_field", "added_field" FROM "tests_childmodel";',
'UPDATE "TEMP_TABLE" SET "added_field" = 42;',
'DROP TABLE "tests_childmodel";',
'CREATE TABLE "tests_childmodel"("int_field" integer NOT NULL, "id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL, "added_field" integer NOT NULL);',
'INSERT INTO "tests_childmodel" ("int_field", "id", "char_field", "added_field") SELECT "int_field", "id", "char_field", "added_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";',
]),
'DeleteFromChildModel':
'\n'.join([
'CREATE TEMPORARY TABLE "TEMP_TABLE"("id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL);',
'INSERT INTO "TEMP_TABLE" SELECT "id", "char_field" FROM "tests_childmodel";',
'DROP TABLE "tests_childmodel";',
'CREATE TABLE "tests_childmodel"("id" integer NOT NULL UNIQUE PRIMARY KEY, "char_field" varchar(20) NOT NULL);',
'INSERT INTO "tests_childmodel" ("id", "char_field") SELECT "id", "char_field" FROM "TEMP_TABLE";',
'DROP TABLE "TEMP_TABLE";'
])
} |
ashhher3/ibis | refs/heads/master | ibis/config.py | 16 | # This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from contextlib import contextmanager
import pprint
import warnings
import sys
from six import StringIO
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "unicode_escape")
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
buf = StringIO()
pprint.pprint(self.d, stream=buf)
return buf.getvalue()
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError("Value must be one of %s"
% str("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
# is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
|
iamaziz/simpleai | refs/heads/master | simpleai/search/__init__.py | 5 | # coding: utf-8
from simpleai.search.models import CspProblem, SearchProblem
from simpleai.search.traditional import breadth_first, depth_first, limited_depth_first, iterative_limited_depth_first, uniform_cost, greedy, astar
from simpleai.search.local import (
beam, hill_climbing, hill_climbing_stochastic, simulated_annealing,
genetic, hill_climbing_random_restarts)
from simpleai.search.csp import (
backtrack, min_conflicts, MOST_CONSTRAINED_VARIABLE,
HIGHEST_DEGREE_VARIABLE, LEAST_CONSTRAINING_VALUE,
convert_to_binary)
|
jeffbinder/adamsmith | refs/heads/master | correlate-index.py | 1 | # Matches up 1784 index to the topic model by using Spearman's rank
# coefficient to find best matches for each topic.
import numpy
import os
import pickle
import scipy.stats
import time
index = pickle.load(open('index.pkl'))
def match():
f = open('doc_topics.txt', 'r')
f.readline()
topic_coefs = {}
for line in f.readlines():
line = line.split('\t')
pagenum = line[1].split('/')[-1].replace('%20', ' ').replace('%3F', '?')
pagenum = int(pagenum)
line = line[2:]
ntopics = len(line) / 2
for i in xrange(0, ntopics):
topic = int(line[i*2])
coef = float(line[i*2 + 1])
topic_coefs.setdefault(topic, {})[pagenum] = coef
heading_matches = {}
topic_matches = {}
for heading in index:
heading_pages = set(page for (subheading, page) in index[heading])
for topic in topic_coefs:
page_coefs = topic_coefs[topic]
data = [(pagenum, page_coefs[pagenum],
1 if pagenum in heading_pages else 0)
for pagenum in page_coefs]
data = sorted(data, key=lambda x: -x[1])
coefs = [x[1] for x in data]
in_heading = [x[2] for x in data]
correlation, p = scipy.stats.spearmanr(coefs, in_heading)
topic_pages = set(x[0] for x in data[:len(heading_pages)])
left = heading_pages - topic_pages
inner = heading_pages & topic_pages
right = topic_pages - heading_pages
heading_matches.setdefault(heading, []) \
.append((topic, correlation, left, inner, right))
topic_matches.setdefault(topic, []) \
.append((heading, correlation, left, inner, right))
for heading in heading_matches:
heading_matches[heading] = sorted(heading_matches[heading],
key=lambda x: -x[1])
for topic in topic_matches:
topic_matches[topic] = sorted(topic_matches[topic],
key=lambda x: -x[1])
return heading_matches, topic_matches
# Run a trial for each number of topics, recording the correlation of the
# best fit for each individual topic (THIS WILL WRITE OVER THE TOPIC MODEL/
# FILES!). Also records the number of headings that are matched with
# correlation >= 0.25 by at least one topic.
# of = open('results_by_num_topics_2.csv', 'w')
# of.write('num_topics,topic_num,avg_best_correlation\n')
# for ntopics in xrange(5, 61):
# os.system('mallet train-topics --input pages.mallet --num-topics '
# + str(ntopics) + ' --output-state topic_state.gz '
# '--output-topic-keys topic_keys.txt --output-doc-topics '
# 'doc_topics.txt')
# f = open('topic_keys.txt', 'r')
# top_words_by_topic = {}
# for line in f.readlines():
# line = line.strip()
# topic, _, words = line.split('\t')
# topic = int(topic)
# top_words_by_topic[topic] = words.split(' ')[:8]
# heading_matches, topic_matches = match()
# avg_correlation = 0.0
# for topic in topic_matches:
# print ' '.join(top_words_by_topic[topic]),
# print topic_matches[topic][0]
# of.write(str(ntopics) + ',' + str(topic) + ','
# + str(topic_matches[topic][0][1]) + '\n')
# avg_correlation += topic_matches[topic][0][1]
# avg_correlation /= float(len(topic_matches))
# print "Average correlation:", avg_correlation
# time.sleep(1)
# of.flush()
# Run a bunch of trials, recording the average best fit each time (THIS
# WILL WRITE OVER THE TOPIC MODEL FILES!).
# of = open('results_by_num_topics.csv', 'w')
# of.write('num_topics,avg_best_correlation\n')
# of2 = open('results_by_num_topics_3.csv', 'w')
# of2.write('num_topics,num_strongly_matched_headings\n')
# for ntopics in xrange(5, 61):
# for trial in xrange(40):
# os.system('mallet train-topics --input pages.mallet --num-topics '
# + str(ntopics) + ' --output-state topic_state.gz '
# '--output-topic-keys topic_keys.txt --output-doc-topics '
# 'doc_topics.txt')
# f = open('topic_keys.txt', 'r')
# top_words_by_topic = {}
# for line in f.readlines():
# line = line.strip()
# topic, _, words = line.split('\t')
# topic = int(topic)
# top_words_by_topic[topic] = words.split(' ')[:8]
# heading_matches, topic_matches = match()
# strongly_matched_headings = set()
# for heading in heading_matches:
# if heading_matches[heading][0][1] >= 0.25:
# strongly_matched_headings.add(heading)
# of2.write(str(ntopics) + ',' + str(len(strongly_matched_headings))
# + '\n')
# avg_correlation = 0.0
# for topic in topic_matches:
# print ' '.join(top_words_by_topic[topic]),
# print topic_matches[topic][0]
# avg_correlation += topic_matches[topic][0][1]
# avg_correlation /= float(len(topic_matches))
# print "Average correlation:", avg_correlation
# time.sleep(1)
# of.write(str(ntopics) + ',' + str(avg_correlation) + '\n')
# of.flush(); of2.flush()
# Print results for a topic model that has already been generated.
f = open('topic_keys.txt', 'r')
top_words_by_topic = {}
for line in f.readlines():
line = line.strip()
topic, _, words = line.split('\t')
topic = int(topic)
top_words_by_topic[topic] = words.split(' ')[:8]
heading_matches, topic_matches = match()
f = open('matches.pkl', 'w')
pickle.dump((heading_matches, topic_matches), f)
print '===== Top topics for headings'
avg_correlation = 0.0
for heading in sorted(heading_matches):
heading_matches[heading][0] \
= (' '.join(top_words_by_topic[heading_matches[heading][0][0]]),) \
+ heading_matches[heading][0][1:]
print heading, heading_matches[heading][0]
avg_correlation += heading_matches[heading][0][1]
avg_correlation /= float(len(heading_matches))
print avg_correlation
print '===== Top headings for topics'
avg_correlation = 0.0
for topic in topic_matches:
print ' '.join(top_words_by_topic[topic]),
print topic_matches[topic][0]
avg_correlation += topic_matches[topic][0][1]
avg_correlation /= float(len(topic_matches))
print avg_correlation
|
lastmansleeping/hindi-toolkit | refs/heads/master | hindi_toolkit/ner/create_train.py | 1 | # coding: utf-8
import codecs
import json
input_file = codecs.open('../data/ner/train', 'r', 'utf-8')
sentences = []
sentence = []
for line in input_file:
if line == '\n':
sentences.append(sentence)
sentence = []
else:
row = line.split('\t')
word = []
for entry in row:
word.append(entry.strip())
sentence.append(word)
input_file.close()
output_file = codecs.open('../data/ner/train.json', 'w', 'utf-8')
json.dump({'sentences': sentences}, output_file,
sort_keys=True, indent=4, encoding='utf-8')
|
ehsan/js-symbolic-executor | refs/heads/master | cvc3/java/run_all.py | 5 | #! /usr/bin/env python
import os
libs = [
"AUFLIA",
"AUFLIRA",
"AUFNIRA",
"QF_AUFBV",
"QF_AUFLIA",
"QF_IDL",
"QF_LIA",
"QF_LRA",
"QF_RDL",
"QF_UF",
"QF_UFBV32",
"QF_UFIDL",
"QF_UFLIA",
]
for lib in libs:
command = "./run_tests.py -vc 'java -esa -Xss100M -jar lib/cvc3.jar' -t 1 -m 500 -l 5 -lang smtlib /media/data/CVC/BENCHMARKS/SMTLIB/" + lib + " > out." + lib
print command
os.system(command)
|
JetBrains/intellij-community | refs/heads/master | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/__init__.py | 2547 | # Dummy file to make this directory a package.
|
hehongliang/tensorflow | refs/heads/master | tensorflow/python/data/experimental/kernel_tests/optimization/map_and_batch_fusion_test.py | 2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndBatchFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MapAndBatchFusionTest(test_base.DatasetTestBase):
def testMapAndBatchFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["MapAndBatch"])).map(lambda x: x * x).batch(10)
options = dataset_ops.Options()
options.experimental_map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[[x * x for x in range(10)]])
if __name__ == "__main__":
test.main()
|
fermat618/pida | refs/heads/master | pida/services/filemanager/filehiddencheck.py | 1 | # -*- coding: utf-8 -*-
"""
:copyright: 2005-2008 by The PIDA Project
:license: GPL 2 or later (see README/COPYING/LICENSE)
"""
SCOPE_PROJECT = 0
SCOPE_GLOBAL = 1
def fhc(scope, label):
def decorate(fn):
fn.scope = scope
fn.label = label
fn.identifier = fn.__name__
return fn
return decorate
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
easycoin-core/Easycoin | refs/heads/master | qa/rpc-tests/rawtransactions.py | 17 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 19332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
|
ImageEngine/gaffer | refs/heads/master | python/GafferUITest/SliderTest.py | 1 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import GafferTest
import GafferUI
import GafferUITest
class SliderTest( GafferUITest.TestCase ) :
def testConstructor( self ) :
s = GafferUI.Slider()
self.assertEqual( s.getPosition(), 0.5 )
self.assertEqual( s.getPositions(), [ 0.5 ] )
s = GafferUI.Slider( position = 1 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( s.getPositions(), [ 1 ] )
s = GafferUI.Slider( positions = [ 0, 1 ] )
self.assertRaises( ValueError, s.getPosition )
self.assertEqual( s.getPositions(), [ 0, 1 ] )
self.assertRaises( Exception, GafferUI.Slider, position = 1, positions = [ 1, 2 ] )
def testPositionAccessors( self ) :
s = GafferUI.Slider( position = 1 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( s.getPositions(), [ 1 ] )
s.setPosition( 2 )
self.assertEqual( s.getPosition(), 2 )
self.assertEqual( s.getPositions(), [ 2 ] )
s.setPositions( [ 2, 3 ] )
self.assertRaises( ValueError, s.getPosition )
self.assertEqual( s.getPositions(), [ 2, 3 ] )
def testSelectedIndex( self ) :
s = GafferUI.Slider( positions = [ 1, 2 ] )
self.assertEqual( s.getSelectedIndex(), None )
s.setSelectedIndex( 0 )
self.assertEqual( s.getSelectedIndex(), 0 )
s.setSelectedIndex( 1 )
self.assertEqual( s.getSelectedIndex(), 1 )
self.assertRaises( IndexError, s.setSelectedIndex, -1 )
self.assertRaises( IndexError, s.setSelectedIndex, 2 )
s.setSelectedIndex( None )
self.assertEqual( s.getSelectedIndex(), None )
cs = GafferTest.CapturingSlot( s.selectedIndexChangedSignal() )
s.setSelectedIndex( 1 )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0] is s )
s.setSelectedIndex( 1 )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0] is s )
s.setSelectedIndex( 0 )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[0][0] is s )
self.assertTrue( cs[1][0] is s )
if __name__ == "__main__":
unittest.main()
|
jameslegg/boto | refs/heads/develop | boto/ec2/attributes.py | 182 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class AccountAttribute(object):
def __init__(self, connection=None):
self.connection = connection
self.attribute_name = None
self.attribute_values = None
def startElement(self, name, attrs, connection):
if name == 'attributeValueSet':
self.attribute_values = AttributeValues()
return self.attribute_values
def endElement(self, name, value, connection):
if name == 'attributeName':
self.attribute_name = value
class AttributeValues(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'attributeValue':
self.append(value)
class VPCAttribute(object):
def __init__(self, connection=None):
self.connection = connection
self.vpc_id = None
self.enable_dns_hostnames = None
self.enable_dns_support = None
self._current_attr = None
def startElement(self, name, attrs, connection):
if name in ('enableDnsHostnames', 'enableDnsSupport'):
self._current_attr = name
def endElement(self, name, value, connection):
if name == 'vpcId':
self.vpc_id = value
elif name == 'value':
if value == 'true':
value = True
else:
value = False
if self._current_attr == 'enableDnsHostnames':
self.enable_dns_hostnames = value
elif self._current_attr == 'enableDnsSupport':
self.enable_dns_support = value
|
worldforge/cyphesis | refs/heads/master | tools/scripts/generator/cpp/stub_class.py | 2 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
# Copyright 2017 Erik Ogenvik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create Stub implementation files from headers.
Use this tool to create stub file used for testing. A ".h" header file is supplied
and an additional "*_custom.h" file is created, if it does not exists yet.
This allows "custom" behaviour to be added to the "*_custom.h" file, since it won't
be touched when regenerating the stubs.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sys
import errno
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def buildTemplatedArgs(node):
template_args = [buildTemplatedArgs(arg) for arg in node.templated_types]
arg = ""
if template_args:
arg = node.name + '<' + ', '.join(template_args) + '>'
else:
arg = node.name
if node.pointer:
arg += "*"
return arg
def _GenerateMethods(output_lines, source, class_node):
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
deleted_or_defaulted = ast.FUNCTION_DEFAULTED | ast.FUNCTION_DELETED
indent = ' ' * _INDENT
#Create Ctor instantiation list
ctor_inits = []
for node in class_node.body:
if (isinstance(node, ast.VariableDeclaration)):
if node.type.pointer and not 'static' in node.type.modifiers:
ctor_inits.append("%s(nullptr)" % node.name)
for node in class_node.body:
#Ignore all functions with all upper case letters, since these should be macros.
if (isinstance(node, ast.Function)
and not node.IsDefinition()
and not node.modifiers & deleted_or_defaulted
and not node.name.isupper()):
# Pick out all the elements we need from the original function.
const = ''
methodName = node.name
if node.modifiers & ast.FUNCTION_CONST:
const = ' const'
return_type = 'void'
if node.modifiers & ctor_or_dtor:
return_type = ''
if node.modifiers & ast.FUNCTION_DTOR:
methodName = "~" + methodName
return_statement = ''
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
# Ignore 'static'
modifiers = ' '.join([i for i in node.return_type.modifiers if i != 'static']) + ' '
raw_return_type = buildTemplatedArgs(node.return_type)
return_type = modifiers + raw_return_type
if node.return_type.pointer:
return_statement = 'return nullptr;'
elif node.return_type.name != 'void':
if node.return_type.name in ['int', 'unsigned int', 'long', 'unsigned long', 'size_t', 'float', 'double', 'short', 'unsigned short'
'int32_t', 'int64_t', 'uint32_t', 'uint32_t', 'std::int32_t', 'std::int64_t', 'std::uint32_t', 'std::uint32_t']:
return_statement = 'return 0;'
elif node.return_type.name == 'bool':
return_statement = 'return false;'
elif node.return_type.name in ['std::string', 'char*', 'const char*']:
if node.return_type.reference:
return_statement = 'static %s instance; return instance;' % raw_return_type
else:
return_statement = 'return "";'
elif node.return_type.name in ['std::vector', 'std::set', 'std::list', 'std::map']:
if node.return_type.reference:
return_statement = 'static %s instance; return instance;' % raw_return_type
else:
return_statement = 'return %s();' % raw_return_type
else:
return_statement = 'return *static_cast<%s*>(nullptr);' % (return_type)
if node.return_type.reference:
return_type += '&'
num_parameters = len(node.parameters)
if len(node.parameters) == 1:
first_param = node.parameters[0]
if source[first_param.start:first_param.end].strip() == 'void':
# We must treat T(void) as a function with no parameters.
num_parameters = 0
tmpl = ''
if class_node.templated_types:
tmpl = '_T'
args = ''
if node.parameters:
# Remove any default values
args = ', '.join('%s' % source[param.start:param.end].split('=')[0] for param in node.parameters)
guard = 'STUB_%s_%s' % (class_node.name, node.name.replace('==', '_EQUALS').replace('[]', '_INDEX'))
if node.modifiers & ast.FUNCTION_DTOR:
guard += "_DTOR"
# Create the mock method definition.
output_lines.extend(['#ifndef %s' % guard])
# Add a commented out line to allow for easily copying to the "custom" file if needed.
output_lines.extend(['//#define %s' % guard])
template_specifier = ""
if class_node.templated_types and len(class_node.templated_types) > 0:
output_lines.extend(['%stemplate <%s>' % (indent, ','.join('typename %s' % key for key in class_node.templated_types.keys()))])
template_specifier = "<%s>" % ','.join('%s' % key for key in class_node.templated_types.keys())
if node.modifiers & ast.FUNCTION_CTOR:
output_lines.extend(['%s%s %s%s::%s(%s)%s' % (indent, return_type, class_node.name, template_specifier, methodName, args, const)])
if class_node.bases is not None and len(class_node.bases) > 0:
output_lines.extend(['%s: %s(%s)' % (indent * 2, class_node.bases[0].name, ', '.join(param.name for param in node.parameters))])
if len(ctor_inits) != 0:
output_lines.extend(["%s, %s" % (indent * 2, ','.join(ctor_inits))])
else:
if len(ctor_inits) != 0:
output_lines.extend(["%s: %s" % (indent * 2, ','.join(ctor_inits))])
output_lines.extend(['%s{' % (indent), (indent * 2) + return_statement, '%s}' % (indent)])
else:
output_lines.extend(['%s%s %s%s::%s(%s)%s' % (indent, return_type, class_node.name, template_specifier, methodName, args, const), '%s{' % (indent), (indent * 2) + return_statement, '%s}' % (indent)])
output_lines.extend(['#endif //%s' % guard, ''])
def _GenerateStubs(filename, source, ast_list):
processed_class_names = set()
lines = []
filenameSegments = filename.split("/")
customFileName = 'stub' + ''.join(filenameSegments[-1].split('.')[0:-1]) + '_custom.h'
guard = "STUB_" + filename.upper().replace('.', '_').replace('/', '_')
lines.append("// AUTOGENERATED file, created by the tool generate_stub.py, don't edit!")
lines.append("// If you want to add your own functionality, instead edit the %s file." % customFileName)
lines.append("")
lines.append("#ifndef %s" % guard)
lines.append("#define %s" % guard)
lines.append("")
lines.append("#include \"%s\"" % (filename))
lines.append("#include \"%s\"" % (customFileName))
lines.append("")
for node in ast_list:
if (isinstance(node, ast.Class) and node.body):
class_name = node.name
parent_name = class_name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# Only close the class if there really is a class.
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
lines.append("#endif")
return lines
def main(argv=sys.argv):
if len(argv) < 3:
sys.stderr.write('Cyphesis Stub Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
stubdirectory = argv[2]
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
sys.exit(1)
else:
filenameSegments = filename.split("/")
customFileName = 'stub' + ''.join(filenameSegments[-1].split('.')[0:-1]) + '_custom.h'
if len(filenameSegments) > 1:
stubpath = stubdirectory + '/' + '/'.join(filenameSegments[0:-1]) + "/stub" + filenameSegments[-1]
stubCustomPath = stubdirectory + '/' + '/'.join(filenameSegments[0:-1]) + "/" + customFileName
else:
stubpath = stubdirectory + "/stub" + filenameSegments[-1]
stubCustomPath = stubdirectory + "/" + customFileName
try:
os.makedirs(os.path.dirname(stubpath))
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(os.path.dirname(stubpath)):
pass
else:
raise
lines = _GenerateStubs(filename, source, entire_ast)
with open(stubpath, 'w') as stubFile:
stubFile.write('\n'.join(lines))
sys.stdout.write("Wrote to %s\n" % stubpath)
if not os.path.isfile(stubCustomPath):
with open(stubCustomPath, 'w') as stubCustomFile:
stubCustomFile.write('//Add custom implementations of stubbed functions here; this file won\'t be rewritten when re-generating stubs.\n')
sys.stdout.write("Also created empty custom stub file at %s\n" % stubCustomPath)
# sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
|
zouyapeng/horizon-newtouch | refs/heads/master | openstack_dashboard/dashboards/project/data_processing/jobs/urls.py | 34 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns
from django.conf.urls import url
import openstack_dashboard.dashboards.project.data_processing. \
jobs.views as views
urlpatterns = patterns('',
url(r'^$', views.JobsView.as_view(),
name='index'),
url(r'^$', views.JobsView.as_view(),
name='jobs'),
url(r'^create-job$',
views.CreateJobView.as_view(),
name='create-job'),
url(r'^launch-job$',
views.LaunchJobView.as_view(),
name='launch-job'),
url(r'^launch-job-new-cluster$',
views.LaunchJobNewClusterView.as_view(),
name='launch-job-new-cluster'),
url(r'^choose-plugin$',
views.ChoosePluginView.as_view(),
name='choose-plugin'),
url(r'^(?P<job_id>[^/]+)$',
views.JobDetailsView.as_view(),
name='details'))
|
msebire/intellij-community | refs/heads/master | python/testData/inspections/PyTypeCheckerInspection/CsvRegisterDialect/_csv.py | 21 | def register_dialect(name, dialect):
pass
class Dialect(object):
pass |
AnthonyQuach/cs3240-labdemo | refs/heads/master | helper.py | 40 | def greeting(msg):
print(msg)
|
srjoglekar246/sympy | refs/heads/master | sympy/physics/matrices.py | 2 | """Known matrices related to physics"""
from sympy import Matrix, I
def msigma(i):
"""Returns a Pauli matrix sigma_i. i=1,2,3
See Also
========
http://en.wikipedia.org/wiki/Pauli_matrices
Examples
========
>>> from sympy.physics.matrices import msigma
>>> msigma(1)
[0, 1]
[1, 0]
"""
if i == 1:
mat = ( (
(0, 1),
(1, 0)
) )
elif i == 2:
mat = ( (
(0, -I),
(I, 0)
) )
elif i == 3:
mat = ( (
(1, 0),
(0, -1)
) )
else:
raise IndexError("Invalid Pauli index")
return Matrix(mat)
def pat_matrix(m, dx, dy, dz):
"""Returns the Parallel Axis Theorem matrix to translate the inertia
matrix a distance of (dx, dy, dz) for a body of mass m.
Examples
--------
If the point we want the inertia about is a distance of 2 units of
length and 1 unit along the x-axis we get:
>>> from sympy.physics.matrices import pat_matrix
>>> pat_matrix(2,1,0,0)
[0, 0, 0]
[0, 2, 0]
[0, 0, 2]
In case we want to find the inertia along a vector of (1,1,1):
>>> pat_matrix(2,1,1,1)
[ 4, -2, -2]
[-2, 4, -2]
[-2, -2, 4]
"""
dxdy = -dx*dy ; dydz = -dy*dz ; dzdx = -dz*dx
dxdx = dx**2 ; dydy = dy**2 ; dzdz = dz**2
mat = ((dydy + dzdz, dxdy, dzdx),
(dxdy, dxdx + dzdz, dydz),
(dzdx, dydz, dydy + dxdx))
return m*Matrix(mat)
def mgamma(mu,lower=False):
"""Returns a Dirac gamma matrix gamma^mu in the standard
(Dirac) representation.
If you want gamma_mu, use gamma(mu, True).
We use a convention:
gamma^5 = I * gamma^0 * gamma^1 * gamma^2 * gamma^3
gamma_5 = I * gamma_0 * gamma_1 * gamma_2 * gamma_3 = - gamma^5
See Also
========
http://en.wikipedia.org/wiki/Gamma_matrices
Examples
========
>>> from sympy.physics.matrices import mgamma
>>> mgamma(1)
[ 0, 0, 0, 1]
[ 0, 0, 1, 0]
[ 0, -1, 0, 0]
[-1, 0, 0, 0]
"""
if not mu in [0,1,2,3,5]:
raise IndexError("Invalid Dirac index")
if mu == 0:
mat = (
(1,0,0,0),
(0,1,0,0),
(0,0,-1,0),
(0,0,0,-1)
)
elif mu == 1:
mat = (
(0,0,0,1),
(0,0,1,0),
(0,-1,0,0),
(-1,0,0,0)
)
elif mu == 2:
mat = (
(0,0,0,-I),
(0,0,I,0),
(0,I,0,0),
(-I,0,0,0)
)
elif mu == 3:
mat = (
(0,0,1,0),
(0,0,0,-1),
(-1,0,0,0),
(0,1,0,0)
)
elif mu == 5:
mat = (
(0,0,1,0),
(0,0,0,1),
(1,0,0,0),
(0,1,0,0)
)
m= Matrix(mat)
if lower:
if mu in [1,2,3,5]:
m = - m
return m
#Minkowski tensor using the convention (+,-,-,-) used in the Quantum Field
#Theory
minkowski_tensor = Matrix( (
(1,0,0,0),
(0,-1,0,0),
(0,0,-1,0),
(0,0,0,-1)
))
|
ltilve/chromium | refs/heads/igalia-sidebar | tools/telemetry/PRESUBMIT.py | 15 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
PYLINT_BLACKLIST = []
PYLINT_DISABLED_WARNINGS = ['R0923', 'R0201', 'E1101']
def _CommonChecks(input_api, output_api):
results = []
# TODO(nduca): This should call update_docs.IsUpdateDocsNeeded().
# Disabled due to crbug.com/255326.
if False:
update_docs_path = os.path.join(
input_api.PresubmitLocalPath(), 'update_docs')
assert os.path.exists(update_docs_path)
results.append(output_api.PresubmitError(
'Docs are stale. Please run:\n' +
'$ %s' % os.path.abspath(update_docs_path)))
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=PYLINT_BLACKLIST,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
return results
def GetPathsToPrepend(input_api):
return [input_api.PresubmitLocalPath(),
os.path.join(input_api.PresubmitLocalPath(), os.path.pardir,
os.path.pardir, 'third_party', 'typ')]
def RunWithPrependedPath(prepended_path, fn, *args):
old_path = sys.path
try:
sys.path = prepended_path + old_path
return fn(*args)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def CheckChangeOnCommit(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
|
texcaltech/windmilltownhomes-old | refs/heads/master | django/db/models/fields/__init__.py | 5 | import datetime
import decimal
import re
import time
import math
from itertools import tee
import django.utils.copycompat as copy
from django.db import connection
from django.db.models.fields.subclassing import LegacyConnection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
__metaclass__ = LegacyConnection
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors from to_python
and validate are propagated. The correct value is returned if no error is
raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# an XMLField is represented by a TEXT column type, which is the same
# as the TextField Django field type, which means XMLField's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def related_db_type(self, connection):
# This is the db_type used by a ForeignKey.
return self.db_type(connection=connection)
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"Perform preliminary non-db specific value checks and conversions."
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value, connection=connection, prepared=False)
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection, prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be an integer.'),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def related_db_type(self, connection):
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types['RelatedAutoField'] % data
except KeyError:
return IntegerField().db_type(connection=connection)
def to_python(self, value):
if not (value is None or isinstance(value, (basestring, int, long))):
raise exceptions.ValidationError(self.error_messages['invalid'])
return value
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts AutoField into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_auto(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be either True or False.'),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
old_value = getattr(model_instance, self.attname)
if self.auto_now or (not old_value and self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
default_error_messages = {
'invalid': _(u'Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
old_value = getattr(model_instance, self.attname)
if self.auto_now or (not old_value and self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed twice
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def related_db_type(self, connection):
if not connection.features.related_fields_match_type:
return IntegerField().db_type(connection=connection)
return super(PositiveIntegerField, self).related_db_type(
connection=connection)
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def related_db_type(self, connection):
if not connection.features.related_fields_match_type:
return IntegerField().db_type(connection=connection)
return super(PositiveSmallIntegerField, self).related_db_type(
connection=connection)
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = _("Time")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
old_value = getattr(model_instance, self.attname)
if self.auto_now or (not old_value and self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=True, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed twice
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class XMLField(TextField):
description = _("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
|
Kromey/piroute | refs/heads/master | iptables/views.py | 1 | from django.shortcuts import render
from django.template.loader import render_to_string
from django.forms import formset_factory
from . import forms, utils
# Create your views here.
def index(request):
rules = None
if request.method == 'POST':
ifaces = forms.InterfacesForm(request.POST)
ruleformset = forms.RuleFormset(request.POST)
if ifaces.is_valid() and ruleformset.is_valid():
data = ruleformset.cleaned_data
data.sort(key=lambda rule: rule['ORDER'] or 100000)
rules = render_to_string('iptables/rules.v4', {'rules':utils.prep_rules(data)})
ruleformset = forms.RuleFormset(initial=data)
else:
ifaces = forms.InterfacesForm()
ruleformset = forms.RuleFormset()
return render(request, 'iptables/index.html', {'ifaces':ifaces, 'ruleformset':ruleformset, 'rules':rules})
|
arun6582/django-manage-reset-migrations | refs/heads/master | reinit_migrations/apps.py | 2 | from __future__ import unicode_literals
from django.apps import AppConfig
class ReInitMigrationsConfig(AppConfig):
name = 'reinit_migrations'
|
epzt/GiSedTrend | refs/heads/master | ui_pygisedtrend_text_file_analysis.py | 1 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_pygisedtrend_text_file_analysis.ui'
#
# Created: Mon Aug 10 11:31:03 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_pygisedtrend_text_file_analysis(object):
def setupUi(self, pygisedtrend_text_file_analysis):
pygisedtrend_text_file_analysis.setObjectName(_fromUtf8("pygisedtrend_text_file_analysis"))
pygisedtrend_text_file_analysis.resize(688, 472)
self.buttonBox = QtGui.QDialogButtonBox(pygisedtrend_text_file_analysis)
self.buttonBox.setGeometry(QtCore.QRect(330, 430, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.groupBox = QtGui.QGroupBox(pygisedtrend_text_file_analysis)
self.groupBox.setGeometry(QtCore.QRect(20, 20, 621, 101))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.radioSpace = QtGui.QRadioButton(self.groupBox)
self.radioSpace.setGeometry(QtCore.QRect(20, 30, 86, 22))
self.radioSpace.setChecked(True)
self.radioSpace.setObjectName(_fromUtf8("radioSpace"))
self.radioTabulation = QtGui.QRadioButton(self.groupBox)
self.radioTabulation.setGeometry(QtCore.QRect(120, 30, 120, 22))
self.radioTabulation.setObjectName(_fromUtf8("radioTabulation"))
self.radioComma = QtGui.QRadioButton(self.groupBox)
self.radioComma.setGeometry(QtCore.QRect(260, 30, 104, 22))
self.radioComma.setObjectName(_fromUtf8("radioComma"))
self.radioSimilicon = QtGui.QRadioButton(self.groupBox)
self.radioSimilicon.setGeometry(QtCore.QRect(390, 30, 113, 22))
self.radioSimilicon.setObjectName(_fromUtf8("radioSimilicon"))
self.radioPipe = QtGui.QRadioButton(self.groupBox)
self.radioPipe.setGeometry(QtCore.QRect(510, 30, 84, 22))
self.radioPipe.setObjectName(_fromUtf8("radioPipe"))
self.radioOther = QtGui.QRadioButton(self.groupBox)
self.radioOther.setGeometry(QtCore.QRect(21, 63, 86, 22))
self.radioOther.setObjectName(_fromUtf8("radioOther"))
self.lineEditOther = QtGui.QLineEdit(self.groupBox)
self.lineEditOther.setEnabled(False)
self.lineEditOther.setGeometry(QtCore.QRect(113, 61, 129, 27))
self.lineEditOther.setObjectName(_fromUtf8("lineEditOther"))
self.groupBox_2 = QtGui.QGroupBox(pygisedtrend_text_file_analysis)
self.groupBox_2.setGeometry(QtCore.QRect(20, 130, 631, 80))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.widget = QtGui.QWidget(self.groupBox_2)
self.widget.setGeometry(QtCore.QRect(12, 30, 610, 31))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.checkBoxHeader = QtGui.QCheckBox(self.widget)
self.checkBoxHeader.setObjectName(_fromUtf8("checkBoxHeader"))
self.horizontalLayout_2.addWidget(self.checkBoxHeader)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.spinBoxSkipLine = QtGui.QSpinBox(self.widget)
self.spinBoxSkipLine.setObjectName(_fromUtf8("spinBoxSkipLine"))
self.horizontalLayout.addWidget(self.spinBoxSkipLine)
self.label_2 = QtGui.QLabel(self.widget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.checkBoxCommaSeparator = QtGui.QCheckBox(self.widget)
self.checkBoxCommaSeparator.setObjectName(_fromUtf8("checkBoxCommaSeparator"))
self.horizontalLayout_2.addWidget(self.checkBoxCommaSeparator)
self.tableAnalysisResult = QtGui.QTableWidget(pygisedtrend_text_file_analysis)
self.tableAnalysisResult.setGeometry(QtCore.QRect(30, 270, 641, 151))
self.tableAnalysisResult.setObjectName(_fromUtf8("tableAnalysisResult"))
self.tableAnalysisResult.setColumnCount(0)
self.tableAnalysisResult.setRowCount(0)
self.layoutWidget = QtGui.QWidget(pygisedtrend_text_file_analysis)
self.layoutWidget.setGeometry(QtCore.QRect(30, 210, 91, 52))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setSpacing(3)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label_3 = QtGui.QLabel(self.layoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.comboBoxX = QtGui.QComboBox(self.layoutWidget)
self.comboBoxX.setObjectName(_fromUtf8("comboBoxX"))
self.verticalLayout.addWidget(self.comboBoxX)
self.layoutWidget1 = QtGui.QWidget(pygisedtrend_text_file_analysis)
self.layoutWidget1.setGeometry(QtCore.QRect(140, 210, 91, 52))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setSpacing(3)
self.verticalLayout_2.setMargin(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label_4 = QtGui.QLabel(self.layoutWidget1)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout_2.addWidget(self.label_4)
self.comboBoxY = QtGui.QComboBox(self.layoutWidget1)
self.comboBoxY.setObjectName(_fromUtf8("comboBoxY"))
self.verticalLayout_2.addWidget(self.comboBoxY)
self.layoutWidget2 = QtGui.QWidget(pygisedtrend_text_file_analysis)
self.layoutWidget2.setGeometry(QtCore.QRect(250, 210, 91, 52))
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_3.setSpacing(3)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_5 = QtGui.QLabel(self.layoutWidget2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_3.addWidget(self.label_5)
self.comboBoxZ = QtGui.QComboBox(self.layoutWidget2)
self.comboBoxZ.setObjectName(_fromUtf8("comboBoxZ"))
self.verticalLayout_3.addWidget(self.comboBoxZ)
self.layoutWidget3 = QtGui.QWidget(pygisedtrend_text_file_analysis)
self.layoutWidget3.setGeometry(QtCore.QRect(360, 210, 91, 52))
self.layoutWidget3.setObjectName(_fromUtf8("layoutWidget3"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.layoutWidget3)
self.verticalLayout_4.setSpacing(3)
self.verticalLayout_4.setMargin(0)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.label_6 = QtGui.QLabel(self.layoutWidget3)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout_4.addWidget(self.label_6)
self.comboBoxMean = QtGui.QComboBox(self.layoutWidget3)
self.comboBoxMean.setObjectName(_fromUtf8("comboBoxMean"))
self.verticalLayout_4.addWidget(self.comboBoxMean)
self.layoutWidget4 = QtGui.QWidget(pygisedtrend_text_file_analysis)
self.layoutWidget4.setGeometry(QtCore.QRect(470, 210, 91, 52))
self.layoutWidget4.setObjectName(_fromUtf8("layoutWidget4"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.layoutWidget4)
self.verticalLayout_5.setSpacing(3)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.label_7 = QtGui.QLabel(self.layoutWidget4)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.verticalLayout_5.addWidget(self.label_7)
self.comboBoxSorting = QtGui.QComboBox(self.layoutWidget4)
self.comboBoxSorting.setObjectName(_fromUtf8("comboBoxSorting"))
self.verticalLayout_5.addWidget(self.comboBoxSorting)
self.layoutWidget5 = QtGui.QWidget(pygisedtrend_text_file_analysis)
self.layoutWidget5.setGeometry(QtCore.QRect(580, 210, 91, 52))
self.layoutWidget5.setObjectName(_fromUtf8("layoutWidget5"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.layoutWidget5)
self.verticalLayout_6.setSpacing(3)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.label_8 = QtGui.QLabel(self.layoutWidget5)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.verticalLayout_6.addWidget(self.label_8)
self.comboBoxSkewness = QtGui.QComboBox(self.layoutWidget5)
self.comboBoxSkewness.setObjectName(_fromUtf8("comboBoxSkewness"))
self.verticalLayout_6.addWidget(self.comboBoxSkewness)
self.retranslateUi(pygisedtrend_text_file_analysis)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), pygisedtrend_text_file_analysis.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), pygisedtrend_text_file_analysis.reject)
QtCore.QObject.connect(self.radioOther, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.lineEditOther.setEnabled)
QtCore.QMetaObject.connectSlotsByName(pygisedtrend_text_file_analysis)
def retranslateUi(self, pygisedtrend_text_file_analysis):
pygisedtrend_text_file_analysis.setWindowTitle(_translate("pygisedtrend_text_file_analysis", "Dialog", None))
self.groupBox.setTitle(_translate("pygisedtrend_text_file_analysis", "Separator", None))
self.radioSpace.setText(_translate("pygisedtrend_text_file_analysis", "Space ( )", None))
self.radioTabulation.setText(_translate("pygisedtrend_text_file_analysis", "Tabulation (t)", None))
self.radioComma.setText(_translate("pygisedtrend_text_file_analysis", "Comma ( , )", None))
self.radioSimilicon.setText(_translate("pygisedtrend_text_file_analysis", "Similicon ( ; )", None))
self.radioPipe.setText(_translate("pygisedtrend_text_file_analysis", "Pipe ( | )", None))
self.radioOther.setText(_translate("pygisedtrend_text_file_analysis", "Other(s)", None))
self.groupBox_2.setTitle(_translate("pygisedtrend_text_file_analysis", "Line treatments", None))
self.checkBoxHeader.setText(_translate("pygisedtrend_text_file_analysis", "First line as header", None))
self.label.setText(_translate("pygisedtrend_text_file_analysis", "Skip", None))
self.label_2.setText(_translate("pygisedtrend_text_file_analysis", "line(s)", None))
self.checkBoxCommaSeparator.setText(_translate("pygisedtrend_text_file_analysis", "Comma decimal separator", None))
self.label_3.setText(_translate("pygisedtrend_text_file_analysis", "X", None))
self.label_4.setText(_translate("pygisedtrend_text_file_analysis", "Y", None))
self.label_5.setText(_translate("pygisedtrend_text_file_analysis", "Z", None))
self.label_6.setText(_translate("pygisedtrend_text_file_analysis", "Mean", None))
self.label_7.setText(_translate("pygisedtrend_text_file_analysis", "Sorting", None))
self.label_8.setText(_translate("pygisedtrend_text_file_analysis", "Skewness", None))
|
begoldsm/autorest | refs/heads/master | src/generator/AutoRest.Python.Tests/AcceptanceTests/custom_base_uri_tests.py | 9 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import os
from datetime import date, datetime, timedelta
from os.path import dirname, pardir, join, realpath
cwd = dirname(realpath(__file__))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "CustomBaseUri"))
sys.path.append(join(tests, "CustomBaseUriMoreOptions"))
from msrest.exceptions import (
DeserializationError,
SerializationError,
ClientRequestError,
ValidationError)
from autorestparameterizedhosttestclient import AutoRestParameterizedHostTestClient
from autorestparameterizedhosttestclient.models import Error, ErrorException
from autorestparameterizedcustomhosttestclient import AutoRestParameterizedCustomHostTestClient
class CustomBaseUriTests(unittest.TestCase):
def test_custom_base_uri_positive(self):
client = AutoRestParameterizedHostTestClient("host:3000")
client.paths.get_empty("local")
def test_custom_base_uri_negative(self):
client = AutoRestParameterizedHostTestClient("host:3000")
client.config.retry_policy.retries = 0
with self.assertRaises(ClientRequestError):
client.paths.get_empty("bad")
with self.assertRaises(ValidationError):
client.paths.get_empty(None)
client.config.host = "badhost:3000"
with self.assertRaises(ClientRequestError):
client.paths.get_empty("local")
def test_custom_base_uri_more_optiopns(self):
client = AutoRestParameterizedCustomHostTestClient("test12", "host:3000")
client.paths.get_empty("http://lo", "cal", "key1")
if __name__ == '__main__':
unittest.main()
|
kennedyshead/home-assistant | refs/heads/dev | tests/components/cloud/test_tts.py | 8 | """Tests for cloud tts."""
from unittest.mock import Mock
from hass_nabucasa import voice
import pytest
import voluptuous as vol
from homeassistant.components.cloud import const, tts
@pytest.fixture()
def cloud_with_prefs(cloud_prefs):
"""Return a cloud mock with prefs."""
return Mock(client=Mock(prefs=cloud_prefs))
def test_default_exists():
"""Test our default language exists."""
assert const.DEFAULT_TTS_DEFAULT_VOICE in voice.MAP_VOICE
def test_schema():
"""Test schema."""
assert "nl-NL" in tts.SUPPORT_LANGUAGES
processed = tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL"})
assert processed["gender"] == "female"
with pytest.raises(vol.Invalid):
tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "non-existing", "gender": "female"}
)
with pytest.raises(vol.Invalid):
tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "nl-NL", "gender": "not-supported"}
)
# Should not raise
tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL", "gender": "female"})
tts.PLATFORM_SCHEMA({"platform": "cloud"})
async def test_prefs_default_voice(hass, cloud_with_prefs, cloud_prefs):
"""Test cloud provider uses the preferences."""
assert cloud_prefs.tts_default_voice == ("en-US", "female")
provider_pref = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, {}
)
provider_conf = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}),
{"language": "fr-FR", "gender": "female"},
None,
)
assert provider_pref.default_language == "en-US"
assert provider_pref.default_options == {"gender": "female"}
assert provider_conf.default_language == "fr-FR"
assert provider_conf.default_options == {"gender": "female"}
await cloud_prefs.async_update(tts_default_voice=("nl-NL", "male"))
await hass.async_block_till_done()
assert provider_pref.default_language == "nl-NL"
assert provider_pref.default_options == {"gender": "male"}
assert provider_conf.default_language == "fr-FR"
assert provider_conf.default_options == {"gender": "female"}
async def test_provider_properties(cloud_with_prefs):
"""Test cloud provider."""
provider = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, {}
)
assert provider.supported_options == ["gender"]
assert "nl-NL" in provider.supported_languages
async def test_get_tts_audio(cloud_with_prefs):
"""Test cloud provider."""
provider = await tts.async_get_engine(
Mock(data={const.DOMAIN: cloud_with_prefs}), None, {}
)
assert provider.supported_options == ["gender"]
assert "nl-NL" in provider.supported_languages
|
rnzaws/test-ci-codepipeline-elastic-beanstalk | refs/heads/master | processor/processor/urls.py | 1 | """processor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
windedge/odoo | refs/heads/8.0 | addons/base_geolocalize/__init__.py | 389 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import models
|
waheedahmed/edx-platform | refs/heads/master | lms/djangoapps/django_comment_client/tests/test_utils.py | 3 | # -*- coding: utf-8 -*-
import datetime
import json
import ddt
import mock
from nose.plugins.attrib import attr
from pytz import UTC
from django.utils.timezone import UTC as django_utc
from django.core.urlresolvers import reverse
from django.test import TestCase, RequestFactory
from edxmako import add_lookup
from django_comment_client.tests.factories import RoleFactory
from django_comment_client.tests.unicode import UnicodeTestMixin
import django_comment_client.utils as utils
from courseware.tests.factories import InstructorFactory
from courseware.tabs import get_course_tab_list
from openedx.core.djangoapps.course_groups import cohorts
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts, topic_name_to_id
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from student.roles import CourseStaffRole
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, ToyCourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locator import CourseLocator
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
@attr('shard_1')
class DictionaryTestCase(TestCase):
def test_extract(self):
d = {'cats': 'meow', 'dogs': 'woof'}
k = ['cats', 'dogs', 'hamsters']
expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
self.assertEqual(utils.extract(d, k), expected)
def test_strip_none(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_none(d), expected)
def test_strip_blank(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_blank(d), expected)
def test_merge_dict(self):
d1 = {'cats': 'meow', 'dogs': 'woof'}
d2 = {'lions': 'roar', 'ducks': 'quack'}
expected = {'cats': 'meow', 'dogs': 'woof', 'lions': 'roar', 'ducks': 'quack'}
self.assertEqual(utils.merge_dict(d1, d2), expected)
@attr('shard_1')
class AccessUtilsTestCase(ModuleStoreTestCase):
"""
Base testcase class for access and roles for the
comment client service integration
"""
CREATE_USER = False
def setUp(self):
super(AccessUtilsTestCase, self).setUp()
self.course = CourseFactory.create()
self.course_id = self.course.id
self.student_role = RoleFactory(name='Student', course_id=self.course_id)
self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id)
self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id)
self.student1 = UserFactory(username='student', email='student@edx.org')
self.student1_enrollment = CourseEnrollmentFactory(user=self.student1)
self.student_role.users.add(self.student1)
self.student2 = UserFactory(username='student2', email='student2@edx.org')
self.student2_enrollment = CourseEnrollmentFactory(user=self.student2)
self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True)
self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator)
self.moderator_role.users.add(self.moderator)
self.community_ta1 = UserFactory(username='community_ta1', email='community_ta1@edx.org')
self.community_ta_role.users.add(self.community_ta1)
self.community_ta2 = UserFactory(username='community_ta2', email='community_ta2@edx.org')
self.community_ta_role.users.add(self.community_ta2)
self.course_staff = UserFactory(username='course_staff', email='course_staff@edx.org')
CourseStaffRole(self.course_id).add_users(self.course_staff)
def test_get_role_ids(self):
ret = utils.get_role_ids(self.course_id)
expected = {u'Moderator': [3], u'Community TA': [4, 5]}
self.assertEqual(ret, expected)
def test_has_discussion_privileges(self):
self.assertFalse(utils.has_discussion_privileges(self.student1, self.course_id))
self.assertFalse(utils.has_discussion_privileges(self.student2, self.course_id))
self.assertFalse(utils.has_discussion_privileges(self.course_staff, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.moderator, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.community_ta1, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.community_ta2, self.course_id))
def test_has_forum_access(self):
ret = utils.has_forum_access('student', self.course_id, 'Student')
self.assertTrue(ret)
ret = utils.has_forum_access('not_a_student', self.course_id, 'Student')
self.assertFalse(ret)
ret = utils.has_forum_access('student', self.course_id, 'NotARole')
self.assertFalse(ret)
@ddt.ddt
@attr('shard_1')
class CoursewareContextTestCase(ModuleStoreTestCase):
"""
Base testcase class for courseware context for the
comment client service integration
"""
def setUp(self):
super(CoursewareContextTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
discussion_category="Chapter",
discussion_target="Discussion 1"
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion2",
discussion_category="Chapter / Section / Subsection",
discussion_target="Discussion 2"
)
def test_empty(self):
utils.add_courseware_context([], self.course, self.user)
def test_missing_commentable_id(self):
orig = {"commentable_id": "non-inline"}
modified = dict(orig)
utils.add_courseware_context([modified], self.course, self.user)
self.assertEqual(modified, orig)
def test_basic(self):
threads = [
{"commentable_id": self.discussion1.discussion_id},
{"commentable_id": self.discussion2.discussion_id}
]
utils.add_courseware_context(threads, self.course, self.user)
def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name
"""Asserts that the given thread has the expected set of properties"""
self.assertEqual(
set(thread.keys()),
set(["commentable_id", "courseware_url", "courseware_title"])
)
self.assertEqual(
thread.get("courseware_url"),
reverse(
"jump_to",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"location": discussion.location.to_deprecated_string()
}
)
)
self.assertEqual(thread.get("courseware_title"), expected_title)
assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1")
assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2")
@ddt.data((ModuleStoreEnum.Type.mongo, 2), (ModuleStoreEnum.Type.split, 1))
@ddt.unpack
def test_get_accessible_discussion_modules(self, modulestore_type, expected_discussion_modules):
"""
Tests that the accessible discussion modules having no parents do not get fetched for split modulestore.
"""
course = CourseFactory.create(default_store=modulestore_type)
# Create a discussion module.
test_discussion = self.store.create_child(self.user.id, course.location, 'discussion', 'test_discussion')
# Assert that created discussion module is not an orphan.
self.assertNotIn(test_discussion.location, self.store.get_orphans(course.id))
# Assert that there is only one discussion module in the course at the moment.
self.assertEqual(len(utils.get_accessible_discussion_modules(course, self.user)), 1)
# Add an orphan discussion module to that course
orphan = course.id.make_usage_key('discussion', 'orphan_discussion')
self.store.create_item(self.user.id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Assert that the discussion module is an orphan.
self.assertIn(orphan, self.store.get_orphans(course.id))
self.assertEqual(len(utils.get_accessible_discussion_modules(course, self.user)), expected_discussion_modules)
@attr('shard_3')
class CachedDiscussionIdMapTestCase(ModuleStoreTestCase):
"""
Tests that using the cache of discussion id mappings has the same behavior as searching through the course.
"""
def setUp(self):
super(CachedDiscussionIdMapTestCase, self).setUp()
self.course = CourseFactory.create(org='TestX', number='101', display_name='Test Course')
self.discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id',
discussion_category='Chapter',
discussion_target='Discussion 1'
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id_2',
discussion_category='Chapter 2',
discussion_target='Discussion 2'
)
self.private_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='private_discussion_id',
discussion_category='Chapter 3',
discussion_target='Beta Testing',
visible_to_staff_only=True
)
self.bad_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='bad_discussion_id',
discussion_category=None,
discussion_target=None
)
def test_cache_returns_correct_key(self):
usage_key = utils.get_cached_discussion_key(self.course, 'test_discussion_id')
self.assertEqual(usage_key, self.discussion.location)
def test_cache_returns_none_if_id_is_not_present(self):
usage_key = utils.get_cached_discussion_key(self.course, 'bogus_id')
self.assertIsNone(usage_key)
def test_cache_raises_exception_if_course_structure_not_cached(self):
CourseStructure.objects.all().delete()
with self.assertRaises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course, 'test_discussion_id')
def test_cache_raises_exception_if_discussion_id_not_cached(self):
cache = CourseStructure.objects.get(course_id=self.course.id)
cache.discussion_id_map_json = None
cache.save()
with self.assertRaises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course, 'test_discussion_id')
def test_module_does_not_have_required_keys(self):
self.assertTrue(utils.has_required_keys(self.discussion))
self.assertFalse(utils.has_required_keys(self.bad_discussion))
def verify_discussion_metadata(self):
"""Retrieves the metadata for self.discussion and self.discussion2 and verifies that it is correct"""
metadata = utils.get_cached_discussion_id_map(
self.course,
['test_discussion_id', 'test_discussion_id_2'],
self.user
)
discussion1 = metadata[self.discussion.discussion_id]
discussion2 = metadata[self.discussion2.discussion_id]
self.assertEqual(discussion1['location'], self.discussion.location)
self.assertEqual(discussion1['title'], 'Chapter / Discussion 1')
self.assertEqual(discussion2['location'], self.discussion2.location)
self.assertEqual(discussion2['title'], 'Chapter 2 / Discussion 2')
def test_get_discussion_id_map_from_cache(self):
self.verify_discussion_metadata()
def test_get_discussion_id_map_without_cache(self):
CourseStructure.objects.all().delete()
self.verify_discussion_metadata()
def test_get_missing_discussion_id_map_from_cache(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bogus_id'], self.user)
self.assertEqual(metadata, {})
def test_get_discussion_id_map_from_cache_without_access(self):
user = UserFactory.create()
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], self.user)
self.assertEqual(metadata['private_discussion_id']['title'], 'Chapter 3 / Beta Testing')
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], user)
self.assertEqual(metadata, {})
def test_get_bad_discussion_id(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bad_discussion_id'], self.user)
self.assertEqual(metadata, {})
def test_discussion_id_accessible(self):
self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'test_discussion_id'))
def test_bad_discussion_id_not_accessible(self):
self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bad_discussion_id'))
def test_missing_discussion_id_not_accessible(self):
self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bogus_id'))
def test_discussion_id_not_accessible_without_access(self):
user = UserFactory.create()
self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'private_discussion_id'))
self.assertFalse(utils.discussion_category_id_access(self.course, user, 'private_discussion_id'))
class CategoryMapTestMixin(object):
"""
Provides functionality for classes that test
`get_discussion_category_map`.
"""
def assert_category_map_equals(self, expected, requesting_user=None):
"""
Call `get_discussion_category_map`, and verify that it returns
what is expected.
"""
self.assertEqual(
utils.get_discussion_category_map(self.course, requesting_user or self.user),
expected
)
@attr('shard_1')
class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase):
"""
Base testcase class for discussion categories for the
comment client service integration
"""
def setUp(self):
super(CategoryMapTestCase, self).setUp()
self.course = CourseFactory.create(
org="TestX", number="101", display_name="Test Course",
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime.datetime(2012, 2, 3, tzinfo=UTC)
)
# Courses get a default discussion topic on creation, so remove it
self.course.discussion_topics = {}
self.course.save()
self.discussion_num = 0
self.instructor = InstructorFactory(course_key=self.course.id)
self.maxDiff = None # pylint: disable=invalid-name
def create_discussion(self, discussion_category, discussion_target, **kwargs):
self.discussion_num += 1
return ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion{}".format(self.discussion_num),
discussion_category=discussion_category,
discussion_target=discussion_target,
**kwargs
)
def assert_category_map_equals(self, expected, cohorted_if_in_list=False, exclude_unstarted=True): # pylint: disable=arguments-differ
"""
Asserts the expected map with the map returned by get_discussion_category_map method.
"""
self.assertEqual(
utils.get_discussion_category_map(self.course, self.instructor, cohorted_if_in_list, exclude_unstarted),
expected
)
def test_empty(self):
self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []})
def test_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
def check_cohorted_topics(expected_ids): # pylint: disable=missing-docstring
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_cohorted": "Topic_A" in expected_ids},
"Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_cohorted": "Topic_B" in expected_ids},
"Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_cohorted": "Topic_C" in expected_ids},
},
"subcategories": {},
"children": ["Topic A", "Topic B", "Topic C"]
}
)
check_cohorted_topics([]) # default (empty) cohort config
set_course_cohort_settings(course_key=self.course.id, is_cohorted=False, cohorted_discussions=[])
check_cohorted_topics([])
set_course_cohort_settings(course_key=self.course.id, is_cohorted=True, cohorted_discussions=[])
check_cohorted_topics([])
set_course_cohort_settings(
course_key=self.course.id,
is_cohorted=True,
cohorted_discussions=["Topic_B", "Topic_C"],
always_cohort_inline_discussions=False,
)
check_cohorted_topics(["Topic_B", "Topic_C"])
set_course_cohort_settings(
course_key=self.course.id,
is_cohorted=True,
cohorted_discussions=["Topic_A", "Some_Other_Topic"],
always_cohort_inline_discussions=False,
)
check_cohorted_topics(["Topic_A"])
# unlikely case, but make sure it works.
set_course_cohort_settings(
course_key=self.course.id,
is_cohorted=False,
cohorted_discussions=["Topic_A"],
always_cohort_inline_discussions=False,
)
check_cohorted_topics([])
def test_single_inline(self):
self.create_discussion("Chapter", "Discussion")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter"]
}
)
def test_inline_with_always_cohort_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_course_cohort_settings(course_key=self.course.id, is_cohorted=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": True,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter"]
}
)
def test_inline_without_always_cohort_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_course_cohort_settings(course_key=self.course.id, is_cohorted=True, always_cohort_inline_discussions=False)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter"]
},
cohorted_if_in_list=True
)
def test_get_unstarted_discussion_modules(self):
later = datetime.datetime(datetime.MAXYEAR, 1, 1, tzinfo=django_utc())
self.create_discussion("Chapter 1", "Discussion 1", start=later)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
"start_date": later
}
},
"subcategories": {},
"children": ["Discussion 1"],
"start_date": later,
"sort_key": "Chapter 1"
}
},
"children": ["Chapter 1"]
},
cohorted_if_in_list=True,
exclude_unstarted=False
)
def test_tree(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
def check_cohorted(is_cohorted):
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": is_cohorted,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion"]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Subsection 1", "Subsection 2"]
}
},
"children": ["Discussion", "Section 1"]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_cohorted": is_cohorted,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Section 1"]
}
},
"children": ["Chapter 1", "Chapter 2", "Chapter 3"]
}
)
# empty / default config
check_cohorted(False)
# explicitly disabled cohorting
set_course_cohort_settings(course_key=self.course.id, is_cohorted=False)
check_cohorted(False)
# explicitly enabled cohorting
set_course_cohort_settings(course_key=self.course.id, is_cohorted=True)
check_cohorted(True)
def test_tree_with_duplicate_targets(self):
self.create_discussion("Chapter 1", "Discussion A")
self.create_discussion("Chapter 1", "Discussion B")
self.create_discussion("Chapter 1", "Discussion A") # duplicate
self.create_discussion("Chapter 1", "Discussion A") # another duplicate
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") # duplicate
category_map = utils.get_discussion_category_map(self.course, self.user)
chapter1 = category_map["subcategories"]["Chapter 1"]
chapter1_discussions = set(["Discussion A", "Discussion B", "Discussion A (1)", "Discussion A (2)"])
self.assertEqual(set(chapter1["children"]), chapter1_discussions)
self.assertEqual(set(chapter1["entries"].keys()), chapter1_discussions)
chapter2 = category_map["subcategories"]["Chapter 2"]
subsection1 = chapter2["subcategories"]["Section 1"]["subcategories"]["Subsection 1"]
subsection1_discussions = set(["Discussion", "Discussion (1)"])
self.assertEqual(set(subsection1["children"]), subsection1_discussions)
self.assertEqual(set(subsection1["entries"].keys()), subsection1_discussions)
def test_start_date_filter(self):
now = datetime.datetime.now()
later = datetime.datetime.max
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2 обсуждение", start=later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=later)
self.assertFalse(self.course.self_paced)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1"]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter 1", "Chapter 2"]
}
)
def test_self_paced_start_date_filter(self):
self.course.self_paced = True
self.course.save()
now = datetime.datetime.now()
later = datetime.datetime.max
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2", start=later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=later)
self.assertTrue(self.course.self_paced)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Subsection 1", "Subsection 2"]
}
},
"children": ["Discussion", "Section 1"]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Section 1"]
}
},
"children": ["Chapter 1", "Chapter 2", "Chapter 3"]
}
)
def test_sort_inline_explicit(self):
self.create_discussion("Chapter", "Discussion 1", sort_key="D")
self.create_discussion("Chapter", "Discussion 2", sort_key="A")
self.create_discussion("Chapter", "Discussion 3", sort_key="E")
self.create_discussion("Chapter", "Discussion 4", sort_key="C")
self.create_discussion("Chapter", "Discussion 5", sort_key="B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": "D",
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": "A",
"is_cohorted": False,
},
"Discussion 3": {
"id": "discussion3",
"sort_key": "E",
"is_cohorted": False,
},
"Discussion 4": {
"id": "discussion4",
"sort_key": "C",
"is_cohorted": False,
},
"Discussion 5": {
"id": "discussion5",
"sort_key": "B",
"is_cohorted": False,
}
},
"subcategories": {},
"children": [
"Discussion 2",
"Discussion 5",
"Discussion 4",
"Discussion 1",
"Discussion 3"
]
}
},
"children": ["Chapter"]
}
)
def test_sort_configured_topics_explicit(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A", "sort_key": "B"},
"Topic B": {"id": "Topic_B", "sort_key": "C"},
"Topic C": {"id": "Topic_C", "sort_key": "A"}
}
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "B", "is_cohorted": False},
"Topic B": {"id": "Topic_B", "sort_key": "C", "is_cohorted": False},
"Topic C": {"id": "Topic_C", "sort_key": "A", "is_cohorted": False},
},
"subcategories": {},
"children": ["Topic C", "Topic A", "Topic B"]
}
)
def test_sort_alpha(self):
self.course.discussion_sort_alpha = True
self.course.save()
self.create_discussion("Chapter", "Discussion D")
self.create_discussion("Chapter", "Discussion A")
self.create_discussion("Chapter", "Discussion E")
self.create_discussion("Chapter", "Discussion C")
self.create_discussion("Chapter", "Discussion B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion D": {
"id": "discussion1",
"sort_key": "Discussion D",
"is_cohorted": False,
},
"Discussion A": {
"id": "discussion2",
"sort_key": "Discussion A",
"is_cohorted": False,
},
"Discussion E": {
"id": "discussion3",
"sort_key": "Discussion E",
"is_cohorted": False,
},
"Discussion C": {
"id": "discussion4",
"sort_key": "Discussion C",
"is_cohorted": False,
},
"Discussion B": {
"id": "discussion5",
"sort_key": "Discussion B",
"is_cohorted": False,
}
},
"subcategories": {},
"children": [
"Discussion A",
"Discussion B",
"Discussion C",
"Discussion D",
"Discussion E"
]
}
},
"children": ["Chapter"]
}
)
def test_sort_intermediates(self):
self.create_discussion("Chapter B", "Discussion 2")
self.create_discussion("Chapter C", "Discussion")
self.create_discussion("Chapter A", "Discussion 1")
self.create_discussion("Chapter B", "Discussion 1")
self.create_discussion("Chapter A", "Discussion 2")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter A": {
"entries": {
"Discussion 1": {
"id": "discussion3",
"sort_key": None,
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion5",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter B": {
"entries": {
"Discussion 1": {
"id": "discussion4",
"sort_key": None,
"is_cohorted": False,
},
"Discussion 2": {
"id": "discussion1",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion 1", "Discussion 2"]
},
"Chapter C": {
"entries": {
"Discussion": {
"id": "discussion2",
"sort_key": None,
"is_cohorted": False,
}
},
"subcategories": {},
"children": ["Discussion"]
}
},
"children": ["Chapter A", "Chapter B", "Chapter C"]
}
)
def test_ids_empty(self):
self.assertEqual(utils.get_discussion_categories_ids(self.course, self.user), [])
def test_ids_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C"]
)
def test_ids_inline(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"]
)
def test_ids_mixed(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"]
)
@attr('shard_1')
class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase):
"""
Tests `get_discussion_category_map` on discussion modules which are
only visible to some content groups.
"""
def test_staff_user(self):
"""
Verify that the staff user can access the alpha, beta, and
global discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Alpha',
'Visible to Beta',
'Visible to Everyone'
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_cohorted': True,
'id': 'alpha_group_discussion'
},
'Visible to Beta': {
'sort_key': None,
'is_cohorted': True,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.staff_user
)
def test_alpha_user(self):
"""
Verify that the alpha user can access the alpha and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Alpha',
'Visible to Everyone'
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_cohorted': True,
'id': 'alpha_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.alpha_user
)
def test_beta_user(self):
"""
Verify that the beta user can access the beta and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Beta',
'Visible to Everyone'
],
'entries': {
'Visible to Beta': {
'sort_key': None,
'is_cohorted': True,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.beta_user
)
def test_non_cohorted_user(self):
"""
Verify that the non-cohorted user can access the global
discussion topic.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
'Visible to Everyone'
],
'entries': {
'Visible to Everyone': {
'sort_key': None,
'is_cohorted': True,
'id': 'global_group_discussion'
}
}
}
},
'children': ['General', 'Week 1'],
'entries': {
'General': {
'sort_key': 'General',
'is_cohorted': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.non_cohorted_user
)
class JsonResponseTestCase(TestCase, UnicodeTestMixin):
def _test_unicode_data(self, text):
response = utils.JsonResponse(text)
reparsed = json.loads(response.content)
self.assertEqual(reparsed, text)
@attr('shard_1')
class RenderMustacheTests(TestCase):
"""
Test the `render_mustache` utility function.
"""
@mock.patch('edxmako.LOOKUP', {})
def test_it(self):
"""
Basic test.
"""
add_lookup('main', '', package=__name__)
self.assertEqual(utils.render_mustache('test.mustache', {}), 'Testing 1 2 3.\n')
class DiscussionTabTestCase(ModuleStoreTestCase):
""" Test visibility of the discussion tab. """
def setUp(self):
super(DiscussionTabTestCase, self).setUp()
self.course = CourseFactory.create()
self.enrolled_user = UserFactory.create()
self.staff_user = AdminFactory.create()
CourseEnrollmentFactory.create(user=self.enrolled_user, course_id=self.course.id)
self.unenrolled_user = UserFactory.create()
def discussion_tab_present(self, user):
""" Returns true if the user has access to the discussion tab. """
request = RequestFactory().request()
request.user = user
all_tabs = get_course_tab_list(request, self.course)
return any(tab.type == 'discussion' for tab in all_tabs)
def test_tab_access(self):
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': True}):
self.assertTrue(self.discussion_tab_present(self.staff_user))
self.assertTrue(self.discussion_tab_present(self.enrolled_user))
self.assertFalse(self.discussion_tab_present(self.unenrolled_user))
@mock.patch('ccx.overrides.get_current_ccx')
def test_tab_settings(self, mock_get_ccx):
mock_get_ccx.return_value = True
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': False}):
self.assertFalse(self.discussion_tab_present(self.enrolled_user))
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': True}):
self.assertFalse(self.discussion_tab_present(self.enrolled_user))
class IsCommentableCohortedTestCase(ModuleStoreTestCase):
"""
Test the is_commentable_cohorted function.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
super(IsCommentableCohortedTestCase, self).setUp()
self.toy_course_key = ToyCourseFactory.create().id
def test_is_commentable_cohorted(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
def to_id(name):
"""Helper for topic_name_to_id that uses course."""
return topic_name_to_id(course, name)
# no topics
self.assertFalse(
utils.is_commentable_cohorted(course.id, to_id("General")),
"Course doesn't even have a 'General' topic"
)
# not cohorted
config_course_cohorts(course, is_cohorted=False, discussion_topics=["General", "Feedback"])
self.assertFalse(
utils.is_commentable_cohorted(course.id, to_id("General")),
"Course isn't cohorted"
)
# cohorted, but top level topics aren't
config_course_cohorts(course, is_cohorted=True, discussion_topics=["General", "Feedback"])
self.assertTrue(cohorts.is_course_cohorted(course.id))
self.assertFalse(
utils.is_commentable_cohorted(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
# cohorted, including "Feedback" top-level topics aren't
config_course_cohorts(
course,
is_cohorted=True,
discussion_topics=["General", "Feedback"],
cohorted_discussions=["Feedback"]
)
self.assertTrue(cohorts.is_course_cohorted(course.id))
self.assertFalse(
utils.is_commentable_cohorted(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
self.assertTrue(
utils.is_commentable_cohorted(course.id, to_id("Feedback")),
"Feedback was listed as cohorted. Should be."
)
def test_is_commentable_cohorted_inline_discussion(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
def to_id(name): # pylint: disable=missing-docstring
return topic_name_to_id(course, name)
config_course_cohorts(
course,
is_cohorted=True,
discussion_topics=["General", "Feedback"],
cohorted_discussions=["Feedback", "random_inline"]
)
self.assertTrue(
utils.is_commentable_cohorted(course.id, to_id("random")),
"By default, Non-top-level discussion is always cohorted in cohorted courses."
)
# if always_cohort_inline_discussions is set to False, non-top-level discussion are always
# non cohorted unless they are explicitly set in cohorted_discussions
config_course_cohorts(
course,
is_cohorted=True,
discussion_topics=["General", "Feedback"],
cohorted_discussions=["Feedback", "random_inline"],
always_cohort_inline_discussions=False
)
self.assertFalse(
utils.is_commentable_cohorted(course.id, to_id("random")),
"Non-top-level discussion is not cohorted if always_cohort_inline_discussions is False."
)
self.assertTrue(
utils.is_commentable_cohorted(course.id, to_id("random_inline")),
"If always_cohort_inline_discussions set to False, Non-top-level discussion is "
"cohorted if explicitly set in cohorted_discussions."
)
self.assertTrue(
utils.is_commentable_cohorted(course.id, to_id("Feedback")),
"If always_cohort_inline_discussions set to False, top-level discussion are not affected."
)
def test_is_commentable_cohorted_team(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
config_course_cohorts(course, is_cohorted=True)
team = CourseTeamFactory(course_id=course.id)
# Verify that team discussions are not cohorted, but other discussions are
self.assertFalse(utils.is_commentable_cohorted(course.id, team.discussion_topic_id))
self.assertTrue(utils.is_commentable_cohorted(course.id, "random"))
|
mamachanko/lymph | refs/heads/master | lymph/core/channels.py | 8 | import gevent
import gevent.queue
from lymph.exceptions import Timeout, Nack, RemoteError
from lymph.core.messages import Message
class Channel(object):
def __init__(self, request, server):
self.request = request
self.server = server
class RequestChannel(Channel):
def __init__(self, request, server):
super(RequestChannel, self).__init__(request, server)
self.queue = gevent.queue.Queue()
def recv(self, msg):
self.queue.put(msg)
def get(self, timeout=1):
try:
msg = self.queue.get(timeout=timeout)
if msg.type == Message.NACK:
raise Nack(self.request)
elif msg.type == Message.ERROR:
raise RemoteError.from_reply(self.request, msg)
return msg
except gevent.queue.Empty:
raise Timeout(self.request)
finally:
self.close()
def close(self):
del self.server.channels[self.request.id]
class ReplyChannel(Channel):
def __init__(self, request, server):
super(ReplyChannel, self).__init__(request, server)
self._sent_reply = False
def reply(self, body):
self.server.send_reply(self.request, body)
self._sent_reply = True
def ack(self, unless_reply_sent=False):
if unless_reply_sent and self._sent_reply:
return
self.server.send_reply(self.request, None, msg_type=Message.ACK)
self._sent_reply = True
def nack(self, unless_reply_sent=False):
if unless_reply_sent and self._sent_reply:
return
self.server.send_reply(self.request, None, msg_type=Message.NACK)
self._sent_reply = True
def error(self, **body):
self.server.send_reply(self.request, body, msg_type=Message.ERROR)
def close(self):
pass
|
mbayon/TFG-MachineLearning | refs/heads/master | venv/lib/python3.6/site-packages/django/forms/boundfield.py | 28 | from __future__ import unicode_literals
import datetime
import warnings
from django.forms.utils import flatatt, pretty_name
from django.forms.widgets import Textarea, TextInput
from django.utils import six
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.inspect import func_accepts_kwargs, func_supports_parameter
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
__all__ = ('BoundField',)
@html_safe
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
@cached_property
def subwidgets(self):
"""
Most widgets yield a single subwidget, but others like RadioSelect and
CheckboxSelectMultiple produce one subwidget for each choice.
This property is cached so that only one database query occurs when
rendering ModelChoiceFields.
"""
id_ = self.field.widget.attrs.get('id') or self.auto_id
attrs = {'id': id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
return list(
BoundWidget(self.field.widget, widget, self.form.renderer)
for widget in self.field.widget.subwidgets(self.html_name, self.value(), attrs=attrs)
)
def __bool__(self):
# BoundField evaluates to True even if it doesn't have subwidgets.
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __iter__(self):
return iter(self.subwidgets)
def __len__(self):
return len(self.subwidgets)
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, six.integer_types + (slice,)):
raise TypeError
return self.subwidgets[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
attrs = self.build_widget_attrs(attrs, widget)
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
kwargs = {}
if func_supports_parameter(widget.render, 'renderer') or func_accepts_kwargs(widget.render):
kwargs['renderer'] = self.form.renderer
else:
warnings.warn(
'Add the `renderer` argument to the render() method of %s. '
'It will be mandatory in Django 2.1.' % widget.__class__,
RemovedInDjango21Warning, stacklevel=2,
)
html = widget.render(
name=name,
value=self.value(),
attrs=attrs,
**kwargs
)
return force_text(html)
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
data = self.initial
if self.form.is_bound:
data = self.field.bound_data(self.data, data)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
label_suffix allows overriding the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (self.field.label_suffix if self.field.label_suffix is not None
else self.form.label_suffix)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(':?.!'):
contents = format_html('{}{}', contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = dict(attrs or {}, **{'for': id_for_label})
if self.field.required and hasattr(self.form, 'required_css_class'):
attrs = attrs or {}
if 'class' in attrs:
attrs['class'] += ' ' + self.form.required_css_class
else:
attrs['class'] = self.form.required_css_class
attrs = flatatt(attrs) if attrs else ''
contents = format_html('<label{}>{}</label>', attrs, contents)
else:
contents = conditional_escape(contents)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in force_text(auto_id):
return force_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
@cached_property
def initial(self):
data = self.form.get_initial_for_field(self.field, self.name)
# If this is an auto-generated default date, nix the microseconds for
# standardized handling. See #22502.
if (isinstance(data, (datetime.datetime, datetime.time)) and
not self.field.widget.supports_microseconds):
data = data.replace(microsecond=0)
return data
def build_widget_attrs(self, attrs, widget=None):
if not widget:
widget = self.field.widget
attrs = dict(attrs) # Copy attrs to avoid modifying the argument.
if widget.use_required_attribute(self.initial) and self.field.required and self.form.use_required_attribute:
attrs['required'] = True
if self.field.disabled:
attrs['disabled'] = True
return attrs
@html_safe
@python_2_unicode_compatible
class BoundWidget(object):
"""
A container class used for iterating over widgets. This is useful for
widgets that have choices. For example, the following can be used in a
template:
{% for radio in myform.beatles %}
<label for="{{ radio.id_for_label }}">
{{ radio.choice_label }}
<span class="radio">{{ radio.tag }}</span>
</label>
{% endfor %}
"""
def __init__(self, parent_widget, data, renderer):
self.parent_widget = parent_widget
self.data = data
self.renderer = renderer
def __str__(self):
return self.tag(wrap_label=True)
def tag(self, wrap_label=False):
context = {'widget': self.data, 'wrap_label': wrap_label}
return self.parent_widget._render(self.template_name, context, self.renderer)
@property
def template_name(self):
if 'template_name' in self.data:
return self.data['template_name']
return self.parent_widget.template_name
@property
def id_for_label(self):
return 'id_%s_%s' % (self.data['name'], self.data['index'])
@property
def choice_label(self):
return self.data['label']
|
airfer/Gtest1.7 | refs/heads/master | test/gtest_help_test.py | 2968 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
xunilrj/sandbox | refs/heads/master | courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project3/mnist/part2-mnist/utils.py | 5 | import pickle, gzip, numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
def plot_images(X):
if X.ndim == 1:
X = np.array([X])
num_images = X.shape[0]
num_rows = math.floor(math.sqrt(num_images))
num_cols = math.ceil(num_images/num_rows)
for i in range(num_images):
reshaped_image = X[i,:].reshape(28,28)
plt.subplot(num_rows, num_cols, i+1)
plt.imshow(reshaped_image, cmap = cm.Greys_r)
plt.axis('off')
plt.show()
def pick_examples_of(X, Y, labels, total_count):
bool_arr = None
for label in labels:
bool_arr_for_label = (Y == label)
if bool_arr is None:
bool_arr = bool_arr_for_label
else:
bool_arr |= bool_arr_for_label
filtered_x = X[bool_arr]
filtered_y = Y[bool_arr]
return (filtered_x[:total_count], filtered_y[:total_count])
def extract_training_and_test_examples_with_labels(train_x, train_y, test_x, test_y, labels, training_count, test_count):
filtered_train_x, filtered_train_y = pick_examples_of(train_x, train_y, labels, training_count)
filtered_test_x, filtered_test_y = pick_examples_of(test_x, test_y, labels, test_count)
return (filtered_train_x, filtered_train_y, filtered_test_x, filtered_test_y)
def write_pickle_data(data, file_name):
f = gzip.open(file_name, 'wb')
pickle.dump(data, f)
f.close()
def read_pickle_data(file_name):
f = gzip.open(file_name, 'rb')
data = pickle.load(f, encoding='latin1')
f.close()
return data
def get_MNIST_data():
"""
Reads mnist dataset from file
Returns:
train_x - 2D Numpy array (n, d) where each row is an image
train_y - 1D Numpy array (n, ) where each row is a label
test_x - 2D Numpy array (n, d) where each row is an image
test_y - 1D Numpy array (n, ) where each row is a label
"""
train_set, valid_set, test_set = read_pickle_data('../Datasets/mnist.pkl.gz')
train_x, train_y = train_set
valid_x, valid_y = valid_set
train_x = np.vstack((train_x, valid_x))
train_y = np.append(train_y, valid_y)
test_x, test_y = test_set
return (train_x, train_y, test_x, test_y)
def load_train_and_test_pickle(file_name):
train_x, train_y, test_x, test_y = read_pickle_data(file_name)
return train_x, train_y, test_x, test_y
# returns the feature set in a numpy ndarray
def load_CSV(filename):
stuff = np.asarray(np.loadtxt(open(filename, 'rb'), delimiter=','))
return stuff
|
sanghinitin/golismero | refs/heads/master | tools/sqlmap/waf/fortiweb.py | 8 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "FortiWeb Web Application Firewall (Fortinet Inc.)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"\AFORTIWAFSID=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
if retval:
break
return retval
|
namccart/gnuradio | refs/heads/master | gr-blocks/python/blocks/__init__.py | 47 | #
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Processing blocks common to many flowgraphs.
'''
import os
try:
from blocks_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from blocks_swig import *
from stream_to_vector_decimator import *
#alias old add_vXX and multiply_vXX
add_vcc = add_cc
add_vff = add_ff
add_vii = add_ii
add_vss = add_ss
multiply_vcc = multiply_cc
multiply_vff = multiply_ff
multiply_vii = multiply_ii
multiply_vss = multiply_ss
|
jrxFive/python-nomad | refs/heads/master | nomad/api/jobs.py | 1 | import nomad.api.exceptions
from nomad.api.base import Requester
class Jobs(Requester):
"""
The jobs endpoint is used to query the status of existing
jobs in Nomad and to register new jobs.
By default, the agent's local region is used.
https://www.nomadproject.io/docs/http/jobs.html
"""
ENDPOINT = "jobs"
def __init__(self, **kwargs):
super(Jobs, self).__init__(**kwargs)
def __str__(self):
return "{0}".format(self.__dict__)
def __repr__(self):
return "{0}".format(self.__dict__)
def __getattr__(self, item):
msg = "{0} does not exist".format(item)
raise AttributeError(msg)
def __contains__(self, item):
try:
jobs = self.get_jobs()
for j in jobs:
if j["ID"] == item:
return True
if j["Name"] == item:
return True
else:
return False
except nomad.api.exceptions.URLNotFoundNomadException:
return False
def __len__(self):
jobs = self.get_jobs()
return len(jobs)
def __getitem__(self, item):
try:
jobs = self.get_jobs()
for j in jobs:
if j["ID"] == item:
return j
if j["Name"] == item:
return j
else:
raise KeyError
except nomad.api.exceptions.URLNotFoundNomadException:
raise KeyError
def __iter__(self):
jobs = self.get_jobs()
return iter(jobs)
def get_jobs(self, prefix=None, namespace=None):
""" Lists all the jobs registered with Nomad.
https://www.nomadproject.io/docs/http/jobs.html
arguments:
- prefix :(str) optional, specifies a string to filter jobs on based on an prefix.
This is specified as a querystring parameter.
- namespace :(str) optional, specifies the target namespace. Specifying * would return all jobs.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix, "namespace": namespace}
return self.request(method="get", params=params).json()
def register_job(self, job):
""" Register a job with Nomad.
https://www.nomadproject.io/docs/http/jobs.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request(json=job, method="post").json()
def parse(self, hcl, canonicalize=False):
""" Parse a HCL Job file. Returns a dict with the JSON formatted job.
This API endpoint is only supported from Nomad version 0.8.3.
https://www.nomadproject.io/api/jobs.html#parse-job
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("parse", json={"JobHCL": hcl, "Canonicalize": canonicalize}, method="post", allow_redirects=True).json()
|
skristiansson/eco32-linux | refs/heads/master | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
ido-ran/ran-smart-frame2 | refs/heads/master | web/server/lib/oauth2client/_pure_python_crypt.py | 59 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure Python crypto-related routines for oauth2client.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates.
"""
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
|
anilmuthineni/tensorflow | refs/heads/master | tensorflow/python/client/device_lib_test.py | 7 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped device lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import device_lib
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class DeviceLibTest(test_util.TensorFlowTestCase):
def testListLocalDevices(self):
devices = device_lib.list_local_devices()
self.assertGreater(len(devices), 0)
self.assertEqual(devices[0].device_type, "CPU")
# GPU test
if test.is_built_with_cuda():
self.assertGreater(len(devices), 1)
self.assertTrue("GPU" in [d.device_type for d in devices])
if __name__ == "__main__":
googletest.main()
|
sudheesh001/pontoon | refs/heads/master | pontoon/base/__init__.py | 3 | """Application base, containing global templates."""
default_app_config = 'pontoon.base.apps.BaseConfig'
MOZILLA_REPOS = (
'ssh://hg.mozilla.org/users/m_owca.info/firefox-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/firefox-for-android-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/thunderbird-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/lightning-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/seamonkey-beta/',
'ssh://hg.mozilla.org/users/m_owca.info/firefox-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/firefox-for-android-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/thunderbird-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/lightning-aurora/',
'ssh://hg.mozilla.org/users/m_owca.info/seamonkey-aurora/',
)
|
brandsExclusive/msnp | refs/heads/master | msnp/error.py | 3 | # error.py -- Error class(es)
#
# Copyright (C) 2003 Manish Jethani (manish_jethani AT yahoo.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class Error:
"""Generic exception type"""
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return str(self.code) + ':' + self.message
class HttpError(Error):
"""Error returned from HTTP server"""
def __init__(self, code, message, http_status, http_reason):
Error.__init__(self, code, message)
self.http_status = http_status
self.http_reason = http_reason
def __str__(self):
return Error.__str__(self) \
+ '[%s:%s]' % (str(self.http_status), self.http_reason)
# vim: set ts=4 sw=4 et tw=79 :
|
soldag/home-assistant | refs/heads/dev | homeassistant/components/smart_meter_texas/config_flow.py | 10 | """Config flow for Smart Meter Texas integration."""
import asyncio
import logging
from aiohttp import ClientError
from smart_meter_texas import Account, Client
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
client_session = aiohttp_client.async_get_clientsession(hass)
account = Account(data["username"], data["password"])
client = Client(client_session, account)
try:
await client.authenticate()
except (asyncio.TimeoutError, ClientError, SmartMeterTexasAPIError) as error:
raise CannotConnect from error
except SmartMeterTexasAuthError as error:
raise InvalidAuth(error) from error
# Return info that you want to store in the config entry.
return {"title": account.username}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Smart Meter Texas."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
if not errors:
# Ensure the same account cannot be setup more than once.
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
mixmar91/rabbitmq | refs/heads/master | python/rpc_client.py | 17 | #!/usr/bin/env python
import pika
import uuid
class FibonacciRpcClient(object):
def __init__(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
self.channel = self.connection.channel()
result = self.channel.queue_declare(exclusive=True)
self.callback_queue = result.method.queue
self.channel.basic_consume(self.on_response, no_ack=True,
queue=self.callback_queue)
def on_response(self, ch, method, props, body):
if self.corr_id == props.correlation_id:
self.response = body
def call(self, n):
self.response = None
self.corr_id = str(uuid.uuid4())
self.channel.basic_publish(exchange='',
routing_key='rpc_queue',
properties=pika.BasicProperties(
reply_to = self.callback_queue,
correlation_id = self.corr_id,
),
body=str(n))
while self.response is None:
self.connection.process_data_events()
return int(self.response)
fibonacci_rpc = FibonacciRpcClient()
print " [x] Requesting fib(30)"
response = fibonacci_rpc.call(30)
print " [.] Got %r" % (response,)
|
OpenXT/sync-database | refs/heads/master | sync_db_test/test_admin_repo.py | 1 | #
# Copyright (c) 2012 Citrix Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import py.test
import sync_db.error
import raises
def test_add_repo_invalid(admin):
with raises.sync_error(sync_db.error.RELEASE_REQUIRED):
admin.sa.add_repo(None, "build", "path", 1, "1" * 64)
with raises.sync_error(sync_db.error.RELEASE_TOO_LONG):
admin.sa.add_repo("x" * 101, "build", "path", 1, "1" * 64)
with raises.sync_error(sync_db.error.BUILD_REQUIRED):
admin.sa.add_repo("release", None, "path", 1, "1" * 64)
with raises.sync_error(sync_db.error.BUILD_TOO_LONG):
admin.sa.add_repo("release", "x" * 101, "path", 1, "1" * 64)
with raises.sync_error(sync_db.error.FILE_PATH_REQUIRED):
admin.sa.add_repo("release", "build", None, 1, "1" * 64)
with raises.sync_error(sync_db.error.FILE_PATH_TOO_LONG):
admin.sa.add_repo("release", "build", "x" * 1025, 1, "1" * 64)
with raises.sync_error(sync_db.error.FILE_SIZE_REQUIRED):
admin.sa.add_repo("release", "build", "path", None, "1" * 64)
with raises.sync_error(sync_db.error.FILE_SIZE_INVALID):
admin.sa.add_repo("release", "build", "path", -1, "1" * 64)
with raises.sync_error(sync_db.error.FILE_SIZE_INVALID):
admin.sa.add_repo("release", "build", "path", 1e13, "1" * 64)
with raises.sync_error(sync_db.error.FILE_HASH_REQUIRED):
admin.sa.add_repo("release", "build", "path", 1, None)
with raises.sync_error(sync_db.error.FILE_HASH_INVALID):
admin.sa.add_repo("release", "build", "path", 1, "1" * 63)
with raises.sync_error(sync_db.error.FILE_HASH_INVALID):
admin.sa.add_repo("release", "build", "path", 1, "1" * 65)
def test_add_repo_duplicate(admin):
admin.sa.add_repo("release1", "build1", "path1", 1, "1" * 64)
with raises.sync_error(sync_db.error.REPO_EXISTS):
admin.sa.add_repo("release1", "build1", "path2", 2, "2" * 64)
admin.sa.add_repo("release1", "build2", "path2", 2, "2" * 64)
with raises.sync_error(sync_db.error.FILE_PATH_NOT_UNIQUE):
admin.sa.add_repo("release1", "build3", "path1", 3, "3" * 64)
def test_complete_repo_uuid(admin):
repo1 = admin.sa.add_repo("release1", "build1", "path1", 1, "1" * 64)
repo2 = admin.sa.add_repo("release2", "build2", "path2", 2, "2" * 64)
assert admin.sa.complete_repo_uuid("") == sorted([repo1, repo2])
assert admin.sa.complete_repo_uuid(repo1) == [repo1]
assert admin.sa.complete_repo_uuid("x") == []
def test_get_repo(admin):
repo1 = admin.sa.add_repo("release1", "build1", "path1", 1, "1" * 64)
repo2 = admin.sa.add_repo("release2", "build2", "path2", 2, "2" * 64)
assert admin.sa.get_repo(repo1) == (repo1, "release1", "build1", "path1",
1, "1" * 64)
assert admin.sa.get_repo(repo2) == (repo2, "release2", "build2", "path2",
2, "2" * 64)
with raises.sync_error(sync_db.error.REPO_NOT_FOUND):
admin.sa.get_repo("unknown")
admin.sa.remove_repo(repo1)
with raises.sync_error(sync_db.error.REPO_NOT_FOUND):
admin.sa.get_repo(repo1)
def test_list_repos(admin):
repo1 = admin.sa.add_repo("release1", "build1", "path1", 1, "1" * 64)
repo2 = admin.sa.add_repo("release1", "build2", "path2", 2, "2" * 64)
repo3 = admin.sa.add_repo("release2", "build2", "path3", 3, "3" * 64)
repo1_row = (repo1, "release1", "build1", "path1")
repo2_row = (repo2, "release1", "build2", "path2")
repo3_row = (repo3, "release2", "build2", "path3")
with raises.sync_error(sync_db.error.MULTIPLE_FILTERS):
admin.sa.list_repos(release="release1", build="build1")
assert admin.sa.list_repos() == sorted([repo1_row, repo2_row, repo3_row])
assert admin.sa.list_repos(release="unknown") == []
assert admin.sa.list_repos(build="unknown") == []
assert admin.sa.list_repos(file_path="unknown") == []
assert admin.sa.list_repos(file_hash="unknown") == []
assert admin.sa.list_repos(release="release1") == sorted([repo1_row,
repo2_row])
assert admin.sa.list_repos(build="build2") == sorted([repo2_row,
repo3_row])
assert admin.sa.list_repos(file_path="path1") == [repo1_row]
assert admin.sa.list_repos(file_hash="1" * 64) == [repo1_row]
admin.sa.add_device("device", repo_uuid=repo1)
assert admin.sa.list_repos(unused=True) == sorted([repo2_row, repo3_row])
def test_remove_repo(admin):
repo1 = admin.sa.add_repo("release1", "build1", "path1", 1, "1" * 64)
repo2 = admin.sa.add_repo("release2", "build2", "path2", 2, "2" * 64)
with raises.sync_error(sync_db.error.REPO_NOT_FOUND):
admin.sa.remove_repo("unknown")
device = admin.sa.add_device("device", repo_uuid=repo1)
with raises.sync_error(sync_db.error.REPO_IN_USE):
admin.sa.remove_repo(repo1)
admin.sa.remove_repo(repo2)
admin.sa.remove_device(device)
admin.sa.remove_repo(repo1)
with raises.sync_error(sync_db.error.REPO_NOT_FOUND):
admin.sa.remove_repo(repo1)
|
bonitadecker77/python-for-android | refs/heads/master | python-modules/twisted/twisted/test/reflect_helper_ZDE.py | 101 |
# Helper module for a test_reflect test
1/0
|
srluge/SickRage | refs/heads/master | lib/html5lib/treewalkers/dom.py | 1229 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
import gettext
_ = gettext.gettext
from . import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
def getNodeDetails(self, node):
if node.nodeType == Node.DOCUMENT_TYPE_NODE:
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
return _base.TEXT, node.nodeValue
elif node.nodeType == Node.ELEMENT_NODE:
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
if attr.namespaceURI:
attrs[(attr.namespaceURI, attr.localName)] = attr.value
else:
attrs[(None, attr.name)] = attr.value
return (_base.ELEMENT, node.namespaceURI, node.nodeName,
attrs, node.hasChildNodes())
elif node.nodeType == Node.COMMENT_NODE:
return _base.COMMENT, node.nodeValue
elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
return (_base.DOCUMENT,)
else:
return _base.UNKNOWN, node.nodeType
def getFirstChild(self, node):
return node.firstChild
def getNextSibling(self, node):
return node.nextSibling
def getParentNode(self, node):
return node.parentNode
|
duanwujie/depot_tools | refs/heads/master | third_party/protobuf26/descriptor_pb2.py | 26 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/descriptor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from protobuf26 import descriptor as _descriptor
from protobuf26 import message as _message
from protobuf26 import reflection as _reflection
from protobuf26 import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/descriptor.proto',
package='google.protobuf',
serialized_pb=_b('\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xcb\x03\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x19\n\x11public_dependency\x18\n \x03(\x05\x12\x17\n\x0fweak_dependency\x18\x0b \x03(\x05\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\x12\x39\n\x10source_code_info\x18\t \x01(\x0b\x32\x1f.google.protobuf.SourceCodeInfo\"\xe4\x03\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x39\n\noneof_decl\x18\x08 \x03(\x0b\x32%.google.protobuf.OneofDescriptorProto\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x1a,\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"\xa9\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12\x13\n\x0boneof_index\x18\t \x01(\x05\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"$\n\x14OneofDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x8c\x01\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\x7f\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\"\xab\x04\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12,\n\x1djava_generate_equals_and_hash\x18\x14 \x01(\x08:\x05\x66\x61lse\x12%\n\x16java_string_check_utf8\x18\x1b \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12\x12\n\ngo_package\x18\x0b \x01(\t\x12\"\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x05\x66\x61lse\x12$\n\x15java_generic_services\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13py_generic_services\x18\x12 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xd3\x01\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xbe\x02\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12\x13\n\x04lazy\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\x14\x65xperimental_map_key\x18\t \x01(\t\x12\x13\n\x04weak\x18\n \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x8d\x01\n\x0b\x45numOptions\x12\x13\n\x0b\x61llow_alias\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"}\n\x10\x45numValueOptions\x12\x19\n\ndeprecated\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"{\n\x0eServiceOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"z\n\rMethodOptions\x12\x19\n\ndeprecated\x18! \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x9e\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x12\x17\n\x0f\x61ggregate_value\x18\x08 \x01(\t\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\"\xb1\x01\n\x0eSourceCodeInfo\x12:\n\x08location\x18\x01 \x03(\x0b\x32(.google.protobuf.SourceCodeInfo.Location\x1a\x63\n\x08Location\x12\x10\n\x04path\x18\x01 \x03(\x05\x42\x02\x10\x01\x12\x10\n\x04span\x18\x02 \x03(\x05\x42\x02\x10\x01\x12\x18\n\x10leading_comments\x18\x03 \x01(\t\x12\x19\n\x11trailing_comments\x18\x04 \x01(\tB)\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FIELDDESCRIPTORPROTO_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.protobuf.FieldDescriptorProto.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=3, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=4, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=5, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=6, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=7, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=8, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=9, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=10, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=11, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=12, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=13, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=14, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=15, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=16, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=17, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1378,
serialized_end=1688,
)
_sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_TYPE)
_FIELDDESCRIPTORPROTO_LABEL = _descriptor.EnumDescriptor(
name='Label',
full_name='google.protobuf.FieldDescriptorProto.Label',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LABEL_OPTIONAL', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_REQUIRED', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_REPEATED', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1690,
serialized_end=1757,
)
_sym_db.RegisterEnumDescriptor(_FIELDDESCRIPTORPROTO_LABEL)
_FILEOPTIONS_OPTIMIZEMODE = _descriptor.EnumDescriptor(
name='OptimizeMode',
full_name='google.protobuf.FileOptions.OptimizeMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SPEED', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CODE_SIZE', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LITE_RUNTIME', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2813,
serialized_end=2871,
)
_sym_db.RegisterEnumDescriptor(_FILEOPTIONS_OPTIMIZEMODE)
_FIELDOPTIONS_CTYPE = _descriptor.EnumDescriptor(
name='CType',
full_name='google.protobuf.FieldOptions.CType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRING', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CORD', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING_PIECE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3359,
serialized_end=3406,
)
_sym_db.RegisterEnumDescriptor(_FIELDOPTIONS_CTYPE)
_FILEDESCRIPTORSET = _descriptor.Descriptor(
name='FileDescriptorSet',
full_name='google.protobuf.FileDescriptorSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=124,
)
_FILEDESCRIPTORPROTO = _descriptor.Descriptor(
name='FileDescriptorProto',
full_name='google.protobuf.FileDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='public_dependency', full_name='google.protobuf.FileDescriptorProto.public_dependency', index=3,
number=10, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weak_dependency', full_name='google.protobuf.FileDescriptorProto.weak_dependency', index=4,
number=11, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=5,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=6,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='service', full_name='google.protobuf.FileDescriptorProto.service', index=7,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=8,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FileDescriptorProto.options', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_code_info', full_name='google.protobuf.FileDescriptorProto.source_code_info', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=586,
)
_DESCRIPTORPROTO_EXTENSIONRANGE = _descriptor.Descriptor(
name='ExtensionRange',
full_name='google.protobuf.DescriptorProto.ExtensionRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1029,
serialized_end=1073,
)
_DESCRIPTORPROTO = _descriptor.Descriptor(
name='DescriptorProto',
full_name='google.protobuf.DescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.DescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='field', full_name='google.protobuf.DescriptorProto.field', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oneof_decl', full_name='google.protobuf.DescriptorProto.oneof_decl', index=6,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.DescriptorProto.options', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=589,
serialized_end=1073,
)
_FIELDDESCRIPTORPROTO = _descriptor.Descriptor(
name='FieldDescriptorProto',
full_name='google.protobuf.FieldDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oneof_index', full_name='google.protobuf.FieldDescriptorProto.oneof_index', index=7,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=8,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDDESCRIPTORPROTO_TYPE,
_FIELDDESCRIPTORPROTO_LABEL,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1076,
serialized_end=1757,
)
_ONEOFDESCRIPTORPROTO = _descriptor.Descriptor(
name='OneofDescriptorProto',
full_name='google.protobuf.OneofDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.OneofDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1759,
serialized_end=1795,
)
_ENUMDESCRIPTORPROTO = _descriptor.Descriptor(
name='EnumDescriptorProto',
full_name='google.protobuf.EnumDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1798,
serialized_end=1938,
)
_ENUMVALUEDESCRIPTORPROTO = _descriptor.Descriptor(
name='EnumValueDescriptorProto',
full_name='google.protobuf.EnumValueDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=1940,
serialized_end=2048,
)
_SERVICEDESCRIPTORPROTO = _descriptor.Descriptor(
name='ServiceDescriptorProto',
full_name='google.protobuf.ServiceDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2051,
serialized_end=2195,
)
_METHODDESCRIPTORPROTO = _descriptor.Descriptor(
name='MethodDescriptorProto',
full_name='google.protobuf.MethodDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=2197,
serialized_end=2324,
)
_FILEOPTIONS = _descriptor.Descriptor(
name='FileOptions',
full_name='google.protobuf.FileOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_generate_equals_and_hash', full_name='google.protobuf.FileOptions.java_generate_equals_and_hash', index=3,
number=20, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_string_check_utf8', full_name='google.protobuf.FileOptions.java_string_check_utf8', index=4,
number=27, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=5,
number=9, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='go_package', full_name='google.protobuf.FileOptions.go_package', index=6,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=7,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=8,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=9,
number=18, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.FileOptions.deprecated', index=10,
number=23, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=11,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILEOPTIONS_OPTIMIZEMODE,
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=2327,
serialized_end=2882,
)
_MESSAGEOPTIONS = _descriptor.Descriptor(
name='MessageOptions',
full_name='google.protobuf.MessageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.MessageOptions.deprecated', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=3,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=2885,
serialized_end=3096,
)
_FIELDOPTIONS = _descriptor.Descriptor(
name='FieldOptions',
full_name='google.protobuf.FieldOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.FieldOptions.packed', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lazy', full_name='google.protobuf.FieldOptions.lazy', index=2,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=3,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='experimental_map_key', full_name='google.protobuf.FieldOptions.experimental_map_key', index=4,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weak', full_name='google.protobuf.FieldOptions.weak', index=5,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=6,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDOPTIONS_CTYPE,
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3099,
serialized_end=3417,
)
_ENUMOPTIONS = _descriptor.Descriptor(
name='EnumOptions',
full_name='google.protobuf.EnumOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allow_alias', full_name='google.protobuf.EnumOptions.allow_alias', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.EnumOptions.deprecated', index=1,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=2,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3420,
serialized_end=3561,
)
_ENUMVALUEOPTIONS = _descriptor.Descriptor(
name='EnumValueOptions',
full_name='google.protobuf.EnumValueOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.EnumValueOptions.deprecated', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=1,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3563,
serialized_end=3688,
)
_SERVICEOPTIONS = _descriptor.Descriptor(
name='ServiceOptions',
full_name='google.protobuf.ServiceOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.ServiceOptions.deprecated', index=0,
number=33, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=1,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3690,
serialized_end=3813,
)
_METHODOPTIONS = _descriptor.Descriptor(
name='MethodOptions',
full_name='google.protobuf.MethodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.MethodOptions.deprecated', index=0,
number=33, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=1,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=3815,
serialized_end=3937,
)
_UNINTERPRETEDOPTION_NAMEPART = _descriptor.Descriptor(
name='NamePart',
full_name='google.protobuf.UninterpretedOption.NamePart',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=4175,
serialized_end=4226,
)
_UNINTERPRETEDOPTION = _descriptor.Descriptor(
name='UninterpretedOption',
full_name='google.protobuf.UninterpretedOption',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.UninterpretedOption.name', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aggregate_value', full_name='google.protobuf.UninterpretedOption.aggregate_value', index=6,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=3940,
serialized_end=4226,
)
_SOURCECODEINFO_LOCATION = _descriptor.Descriptor(
name='Location',
full_name='google.protobuf.SourceCodeInfo.Location',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='google.protobuf.SourceCodeInfo.Location.path', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='span', full_name='google.protobuf.SourceCodeInfo.Location.span', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='leading_comments', full_name='google.protobuf.SourceCodeInfo.Location.leading_comments', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trailing_comments', full_name='google.protobuf.SourceCodeInfo.Location.trailing_comments', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=4307,
serialized_end=4406,
)
_SOURCECODEINFO = _descriptor.Descriptor(
name='SourceCodeInfo',
full_name='google.protobuf.SourceCodeInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='location', full_name='google.protobuf.SourceCodeInfo.location', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SOURCECODEINFO_LOCATION, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=4229,
serialized_end=4406,
)
_FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS
_FILEDESCRIPTORPROTO.fields_by_name['source_code_info'].message_type = _SOURCECODEINFO
_DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE
_DESCRIPTORPROTO.fields_by_name['oneof_decl'].message_type = _ONEOFDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS
_FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL
_FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE
_FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS
_FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO
_FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO
_ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO
_ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS
_ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS
_SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO
_SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS
_METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS
_FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE
_FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS
_MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE
_FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS
_ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION
_UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART
_SOURCECODEINFO_LOCATION.containing_type = _SOURCECODEINFO
_SOURCECODEINFO.fields_by_name['location'].message_type = _SOURCECODEINFO_LOCATION
DESCRIPTOR.message_types_by_name['FileDescriptorSet'] = _FILEDESCRIPTORSET
DESCRIPTOR.message_types_by_name['FileDescriptorProto'] = _FILEDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['DescriptorProto'] = _DESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['FieldDescriptorProto'] = _FIELDDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['OneofDescriptorProto'] = _ONEOFDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['EnumDescriptorProto'] = _ENUMDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['EnumValueDescriptorProto'] = _ENUMVALUEDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['ServiceDescriptorProto'] = _SERVICEDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['MethodDescriptorProto'] = _METHODDESCRIPTORPROTO
DESCRIPTOR.message_types_by_name['FileOptions'] = _FILEOPTIONS
DESCRIPTOR.message_types_by_name['MessageOptions'] = _MESSAGEOPTIONS
DESCRIPTOR.message_types_by_name['FieldOptions'] = _FIELDOPTIONS
DESCRIPTOR.message_types_by_name['EnumOptions'] = _ENUMOPTIONS
DESCRIPTOR.message_types_by_name['EnumValueOptions'] = _ENUMVALUEOPTIONS
DESCRIPTOR.message_types_by_name['ServiceOptions'] = _SERVICEOPTIONS
DESCRIPTOR.message_types_by_name['MethodOptions'] = _METHODOPTIONS
DESCRIPTOR.message_types_by_name['UninterpretedOption'] = _UNINTERPRETEDOPTION
DESCRIPTOR.message_types_by_name['SourceCodeInfo'] = _SOURCECODEINFO
FileDescriptorSet = _reflection.GeneratedProtocolMessageType('FileDescriptorSet', (_message.Message,), dict(
DESCRIPTOR = _FILEDESCRIPTORSET,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorSet)
))
_sym_db.RegisterMessage(FileDescriptorSet)
FileDescriptorProto = _reflection.GeneratedProtocolMessageType('FileDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _FILEDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorProto)
))
_sym_db.RegisterMessage(FileDescriptorProto)
DescriptorProto = _reflection.GeneratedProtocolMessageType('DescriptorProto', (_message.Message,), dict(
ExtensionRange = _reflection.GeneratedProtocolMessageType('ExtensionRange', (_message.Message,), dict(
DESCRIPTOR = _DESCRIPTORPROTO_EXTENSIONRANGE,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ExtensionRange)
))
,
DESCRIPTOR = _DESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto)
))
_sym_db.RegisterMessage(DescriptorProto)
_sym_db.RegisterMessage(DescriptorProto.ExtensionRange)
FieldDescriptorProto = _reflection.GeneratedProtocolMessageType('FieldDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _FIELDDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FieldDescriptorProto)
))
_sym_db.RegisterMessage(FieldDescriptorProto)
OneofDescriptorProto = _reflection.GeneratedProtocolMessageType('OneofDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _ONEOFDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.OneofDescriptorProto)
))
_sym_db.RegisterMessage(OneofDescriptorProto)
EnumDescriptorProto = _reflection.GeneratedProtocolMessageType('EnumDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _ENUMDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumDescriptorProto)
))
_sym_db.RegisterMessage(EnumDescriptorProto)
EnumValueDescriptorProto = _reflection.GeneratedProtocolMessageType('EnumValueDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _ENUMVALUEDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueDescriptorProto)
))
_sym_db.RegisterMessage(EnumValueDescriptorProto)
ServiceDescriptorProto = _reflection.GeneratedProtocolMessageType('ServiceDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _SERVICEDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceDescriptorProto)
))
_sym_db.RegisterMessage(ServiceDescriptorProto)
MethodDescriptorProto = _reflection.GeneratedProtocolMessageType('MethodDescriptorProto', (_message.Message,), dict(
DESCRIPTOR = _METHODDESCRIPTORPROTO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.MethodDescriptorProto)
))
_sym_db.RegisterMessage(MethodDescriptorProto)
FileOptions = _reflection.GeneratedProtocolMessageType('FileOptions', (_message.Message,), dict(
DESCRIPTOR = _FILEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FileOptions)
))
_sym_db.RegisterMessage(FileOptions)
MessageOptions = _reflection.GeneratedProtocolMessageType('MessageOptions', (_message.Message,), dict(
DESCRIPTOR = _MESSAGEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.MessageOptions)
))
_sym_db.RegisterMessage(MessageOptions)
FieldOptions = _reflection.GeneratedProtocolMessageType('FieldOptions', (_message.Message,), dict(
DESCRIPTOR = _FIELDOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.FieldOptions)
))
_sym_db.RegisterMessage(FieldOptions)
EnumOptions = _reflection.GeneratedProtocolMessageType('EnumOptions', (_message.Message,), dict(
DESCRIPTOR = _ENUMOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumOptions)
))
_sym_db.RegisterMessage(EnumOptions)
EnumValueOptions = _reflection.GeneratedProtocolMessageType('EnumValueOptions', (_message.Message,), dict(
DESCRIPTOR = _ENUMVALUEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueOptions)
))
_sym_db.RegisterMessage(EnumValueOptions)
ServiceOptions = _reflection.GeneratedProtocolMessageType('ServiceOptions', (_message.Message,), dict(
DESCRIPTOR = _SERVICEOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceOptions)
))
_sym_db.RegisterMessage(ServiceOptions)
MethodOptions = _reflection.GeneratedProtocolMessageType('MethodOptions', (_message.Message,), dict(
DESCRIPTOR = _METHODOPTIONS,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.MethodOptions)
))
_sym_db.RegisterMessage(MethodOptions)
UninterpretedOption = _reflection.GeneratedProtocolMessageType('UninterpretedOption', (_message.Message,), dict(
NamePart = _reflection.GeneratedProtocolMessageType('NamePart', (_message.Message,), dict(
DESCRIPTOR = _UNINTERPRETEDOPTION_NAMEPART,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption.NamePart)
))
,
DESCRIPTOR = _UNINTERPRETEDOPTION,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption)
))
_sym_db.RegisterMessage(UninterpretedOption)
_sym_db.RegisterMessage(UninterpretedOption.NamePart)
SourceCodeInfo = _reflection.GeneratedProtocolMessageType('SourceCodeInfo', (_message.Message,), dict(
Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), dict(
DESCRIPTOR = _SOURCECODEINFO_LOCATION,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.SourceCodeInfo.Location)
))
,
DESCRIPTOR = _SOURCECODEINFO,
__module__ = 'google.protobuf.descriptor_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.SourceCodeInfo)
))
_sym_db.RegisterMessage(SourceCodeInfo)
_sym_db.RegisterMessage(SourceCodeInfo.Location)
# @@protoc_insertion_point(module_scope)
|
paulondc/gaffer | refs/heads/master | python/GafferSceneTest/PathMatcherTest.py | 1 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import random
import IECore
import Gaffer
import GafferScene
class PathMatcherTest( unittest.TestCase ) :
@staticmethod
def generatePaths( seed, depthRange, numChildrenRange ) :
nouns = [
"Ball", "Building", "Car", "Tree", "Rampart", "Head", "Arm",
"Window", "Door", "Trailer", "Light", "FlockOfBirds", "Herd", "Sheep",
"Cow", "Wing", "Engine", "Mast", "Rock", "Road", "Sign",
]
adjectives = [
"big", "red", "metallic", "left", "right", "top", "bottom", "wooden",
"front", "back", "lower", "upper", "magnificent", "hiRes", "loRes",
]
paths = []
def buildWalk( parent="", depth=1 ) :
if depth > random.randint( *depthRange ) :
return
for i in range( 0, random.randint( *numChildrenRange ) ) :
path = parent + "/" + random.choice( adjectives ) + random.choice( nouns ) + str( i )
paths.append( path )
buildWalk( path, depth + 1 )
random.seed( seed )
buildWalk()
return paths
def testMatch( self ) :
m = GafferScene.PathMatcher( [ "/a", "/red", "/b/c/d" ] )
for path, result in [
( "/a", GafferScene.Filter.Result.ExactMatch ),
( "/red", GafferScene.Filter.Result.ExactMatch ),
( "/re", GafferScene.Filter.Result.NoMatch ),
( "/redThing", GafferScene.Filter.Result.NoMatch ),
( "/b/c/d", GafferScene.Filter.Result.ExactMatch ),
( "/c", GafferScene.Filter.Result.NoMatch ),
( "/a/b", GafferScene.Filter.Result.AncestorMatch ),
( "/blue", GafferScene.Filter.Result.NoMatch ),
( "/b/c", GafferScene.Filter.Result.DescendantMatch ),
] :
self.assertEqual( m.match( path ), result )
def testLookupScaling( self ) :
# this test provides a useful means of measuring performance when
# working on the PatchMatcher algorithm. it tests matchers
# for each of two different hierarchies :
#
# * a deep hierarchy with relatively few children at each branch point
# * a shallow hierarchy with large numbers of children at each branch point
#
# the tests build a matcher, and then assert that every path in the hierarchy is
# matched appropriately. uncomment the timers to get useful information printed out.
match = GafferScene.Filter.Result.ExactMatch
# deep hierarchy
paths = self.generatePaths( seed = 10, depthRange = ( 3, 14 ), numChildrenRange = ( 2, 6 ) )
t = IECore.Timer()
matcher = GafferScene.PathMatcher( paths )
#print "BUILD DEEP", t.stop()
t = IECore.Timer()
for path in paths :
self.assertTrue( matcher.match( path ) & match )
#print "LOOKUP DEEP", t.stop()
# shallow hierarchy
paths = self.generatePaths( seed = 10, depthRange = ( 2, 2 ), numChildrenRange = ( 500, 1000 ) )
t = IECore.Timer()
matcher = GafferScene.PathMatcher( paths )
#print "BUILD SHALLOW", t.stop()
t = IECore.Timer()
for path in paths :
self.assertTrue( matcher.match( path ) & match )
#print "LOOKUP SHALLOW", t.stop()
def testDefaultConstructor( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.match( "/" ), GafferScene.Filter.Result.NoMatch )
def testWildcards( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a",
"/red*",
"/green*Bloke*",
"/somewhere/over/the/*",
"/somewhere/over/the/*/skies/are/blue",
] )
)
for path, result in [
( "/a", f.Result.ExactMatch ),
( "/redBoots", f.Result.ExactMatch ),
( "/red", f.Result.ExactMatch ),
( "/redWellies", f.Result.ExactMatch ),
( "/redWellies/in/puddles", f.Result.AncestorMatch ),
( "/greenFatBloke", f.Result.ExactMatch ),
( "/greenBloke", f.Result.ExactMatch ),
( "/greenBlokes", f.Result.ExactMatch ),
( "/somewhere/over/the/rainbow", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/somewhere/over/the", f.Result.DescendantMatch ),
( "/somewhere/over", f.Result.DescendantMatch ),
( "/somewhere", f.Result.DescendantMatch ),
( "/somewhere/over/the/rainbow/skies/are/blue", f.Result.ExactMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/rainbow/skies/are", f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/astonExpressway/skies/are", f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/astonExpressway/skies/are/blue", f.Result.ExactMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/astonExpressway/skies/are/grey", f.Result.AncestorMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["match"].getValue(), int( result ) )
def testWildcardsWithSiblings( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/*/b",
"/a/a*/c",
] )
)
for path, result in [
( "/a/aThing/c", f.Result.ExactMatch ),
( "/a/aThing/b", f.Result.ExactMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["match"].getValue(), int( result ) )
def testRepeatedWildcards( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/**s",
] )
)
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( [ "a", "s" ] )
with c :
self.assertEqual( f["match"].getValue(), int( GafferScene.Filter.Result.ExactMatch ) )
def testEllipsis( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/.../b*",
"/a/c",
] )
)
for path, result in [
( "/a/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/car", f.Result.DescendantMatch ),
( "/a/big/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/lovely/shiny/bicyle", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/c", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/d", f.Result.DescendantMatch ),
( "/a/anything", f.Result.DescendantMatch ),
( "/a/anything/really", f.Result.DescendantMatch ),
( "/a/anything/at/all", f.Result.DescendantMatch ),
( "/b/anything/at/all", f.Result.NoMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["match"].getValue(), int( result ) )
def testEllipsisWithMultipleBranches( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/.../b*",
"/a/.../c*",
] )
)
for path, result in [
( "/a/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/car", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/big/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/lovely/shiny/bicyle", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/c", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/d", f.Result.DescendantMatch ),
( "/a/anything", f.Result.DescendantMatch ),
( "/a/anything/really", f.Result.DescendantMatch ),
( "/a/anything/at/all", f.Result.DescendantMatch ),
( "/b/anything/at/all", f.Result.NoMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["match"].getValue(), int( result ) )
def testEllipsisAsTerminator( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/...",
] )
)
for path, result in [
( "/a", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/ball", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/red/car", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/red/car/rolls", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/terminating/ellipsis/matches/everything/below/it", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["match"].getValue(), int( result ) )
def testCopyConstructorIsDeep( self ) :
m = GafferScene.PathMatcher( [ "/a" ] )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.ExactMatch )
m2 = GafferScene.PathMatcher( m )
self.assertEqual( m2.match( "/a" ), GafferScene.Filter.Result.ExactMatch )
m.clear()
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m2.match( "/a" ), GafferScene.Filter.Result.ExactMatch )
def testAddAndRemovePaths( self ) :
m = GafferScene.PathMatcher()
m.addPath( "/a" )
m.addPath( "/a/b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.ExactMatch | GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.ExactMatch | GafferScene.Filter.Result.AncestorMatch )
m.removePath( "/a" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.ExactMatch )
m.removePath( "/a/b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.NoMatch )
def testRemovePathRemovesIntermediatePaths( self ) :
m = GafferScene.PathMatcher()
m.addPath( "/a/b/c" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b/c" ), GafferScene.Filter.Result.ExactMatch )
m.removePath( "/a/b/c" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/b/c" ), GafferScene.Filter.Result.NoMatch )
def testRemoveEllipsis( self ) :
m = GafferScene.PathMatcher()
m.addPath( "/a/.../b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/c" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/c/b" ), GafferScene.Filter.Result.ExactMatch | GafferScene.Filter.Result.DescendantMatch )
m.removePath( "/a/.../b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/c" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/c/b" ), GafferScene.Filter.Result.NoMatch )
def testAddPathReturnValue( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.addPath( "/" ), True )
self.assertEqual( m.addPath( "/a/b" ), True )
self.assertEqual( m.addPath( "/a/b" ), False )
self.assertEqual( m.addPath( "/a" ), True )
self.assertEqual( m.addPath( "/" ), False )
m = GafferScene.PathMatcher()
self.assertEqual( m.addPath( "/a/b/c" ), True )
self.assertEqual( m.addPath( "/a/b/c" ), False )
self.assertEqual( m.addPath( "/" ), True )
self.assertEqual( m.addPath( "/*" ), True )
self.assertEqual( m.addPath( "/*" ), False )
self.assertEqual( m.addPath( "/..." ), True )
self.assertEqual( m.addPath( "/..." ), False )
self.assertEqual( m.addPath( "/a/b/c/d" ), True )
self.assertEqual( m.addPath( "/a/b/c/d" ), False )
m.removePath( "/a/b/c/d" )
self.assertEqual( m.addPath( "/a/b/c/d" ), True )
self.assertEqual( m.addPath( "/a/b/c/d" ), False )
def testRemovePathReturnValue( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.removePath( "/" ), False )
m.addPath( "/" )
self.assertEqual( m.removePath( "/" ), True )
self.assertEqual( m.removePath( "/" ), False )
self.assertEqual( m.removePath( "/a/b/c" ), False )
m.addPath( "/a/b/c" )
self.assertEqual( m.removePath( "/a/b/c" ), True )
self.assertEqual( m.removePath( "/a/b/c" ), False )
def testEquality( self ) :
m1 = GafferScene.PathMatcher()
m2 = GafferScene.PathMatcher()
self.assertEqual( m1, m2 )
m1.addPath( "/a" )
self.assertNotEqual( m1, m2 )
m2.addPath( "/a" )
self.assertEqual( m1, m2 )
m2.addPath( "/a/b" )
self.assertNotEqual( m1, m2 )
m1.addPath( "/a/b" )
self.assertEqual( m1, m2 )
m1.addPath( "/a/b/.../c" )
self.assertNotEqual( m1, m2 )
m2.addPath( "/a/b/.../c" )
self.assertEqual( m1, m2 )
m2.addPath( "/c*" )
self.assertNotEqual( m1, m2 )
m1.addPath( "/c*" )
self.assertEqual( m1, m2 )
def testPaths( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.paths(), [] )
m.addPath( "/a/b" )
self.assertEqual( m.paths(), [ "/a/b" ] )
m.addPath( "/a/.../b" )
self.assertEqual( m.paths(), [ "/a/b", "/a/.../b" ] )
m.removePath( "/a/.../b" )
self.assertEqual( m.paths(), [ "/a/b" ] )
m.addPath( "/a/b/c*d*" )
self.assertEqual( m.paths(), [ "/a/b", "/a/b/c*d*" ] )
m.clear()
self.assertEqual( m.paths(), [] )
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a",
"/red*",
"/green*Bloke*",
"/somewhere/over/the/*",
"/somewhere/over/the/*/skies/are/blue",
] )
)
def testMultipleMatchTypes( self ) :
m = GafferScene.PathMatcher( [
"/a",
"/a/b",
"/a/b/c",
] )
r = GafferScene.Filter.Result
for path, result in [
( "/a", r.ExactMatch | r.DescendantMatch ),
( "/a/b", r.ExactMatch | r.AncestorMatch | r.DescendantMatch ),
( "/a/b/c", r.ExactMatch | r.AncestorMatch ),
( "/a/b/d", r.AncestorMatch ),
] :
self.assertEqual( m.match( path ), int( result ) )
def testAncestorMatch( self ) :
m = GafferScene.PathMatcher( [
"/a",
] )
r = GafferScene.Filter.Result
for path, result in [
( "/a/b", r.AncestorMatch ),
( "/a/b/c", r.AncestorMatch ),
( "/a/b/d", r.AncestorMatch ),
( "/b/d", r.NoMatch ),
] :
self.assertEqual( m.match( path ), int( result ) )
def testWildCardAncestorMatch( self ) :
m = GafferScene.PathMatcher( [
"/a*",
] )
r = GafferScene.Filter.Result
for path, result in [
( "/armadillo/brunches", r.AncestorMatch ),
( "/a/b/c", r.AncestorMatch ),
( "/a/b/d", r.AncestorMatch ),
( "/b/d", r.NoMatch ),
( "/armadillo", r.ExactMatch ),
] :
self.assertEqual( m.match( path ), int( result ) )
def testRootAncestorMatch( self ) :
m = GafferScene.PathMatcher( [
"/",
] )
r = GafferScene.Filter.Result
for path, result in [
( "/armadillo/brunches", r.AncestorMatch ),
( "/a/b/c", r.AncestorMatch ),
( "/a/b/d", r.AncestorMatch ),
( "/b/d", r.AncestorMatch ),
( "/armadillo", r.AncestorMatch ),
] :
self.assertEqual( m.match( path ), int( result ) )
if __name__ == "__main__":
unittest.main()
|
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | refs/heads/master | unittests/Applications/CaseBuilder.py | 3 | import unittest
from PyFoam.Applications.CaseBuilder import CaseBuilder
theSuite=unittest.TestSuite()
|
ArneBab/pypyjs | refs/heads/master | website/demo/home/rfk/repos/pypy/lib-python/2.7/test/test_functools.py | 4 | import functools
import sys
import unittest
from test import test_support
from weakref import proxy
import pickle
@staticmethod
def PythonPartial(func, *args, **keywords):
'Pure Python approximation of partial()'
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class TestPartial(unittest.TestCase):
thetype = functools.partial
def test_basic_examples(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.thetype(map, lambda x: x*10)
self.assertEqual(p([1,2,3,4]), [10, 20, 30, 40])
def test_attributes(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
# attributes should not be writable
if not isinstance(self.thetype, type):
return
if not test_support.check_impl_detail():
return
self.assertRaises(TypeError, setattr, p, 'func', map)
self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.thetype(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_argument_checking(self):
self.assertRaises(TypeError, self.thetype) # need at least a func arg
try:
self.thetype(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.thetype(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.thetype(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.thetype(capture, a=1)
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.thetype(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.thetype(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.thetype(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x // y
self.assertRaises(ZeroDivisionError, self.thetype(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.thetype(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.thetype(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.thetype(f, y=0), 1)
def test_weakref(self):
f = self.thetype(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
test_support.gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = map(str, range(10))
join = self.thetype(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.thetype(''.join)
self.assertEqual(join(data), '0123456789')
def test_pickle(self):
f = self.thetype(signature, 'asdf', bar=True)
f.add_something_to__dict__ = True
f_copy = pickle.loads(pickle.dumps(f))
self.assertEqual(signature(f), signature(f_copy))
class PartialSubclass(functools.partial):
pass
class TestPartialSubclass(TestPartial):
thetype = PartialSubclass
class TestPythonPartial(TestPartial):
thetype = PythonPartial
# the python version isn't picklable
def test_pickle(self): pass
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertTrue(getattr(wrapper, name) == getattr(wrapped, name), name)
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
self.assertTrue(wrapped_attr[key] is wrapper_attr[key])
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f)
def wrapper():
pass
self.check_wrapper(wrapper, f)
return wrapper
def test_default_update(self):
wrapper = self._default_update()
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(not sys.flags.optimize <= 1,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce(unittest.TestCase):
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
reduce = functools.reduce
self.assertEqual(reduce(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
reduce(lambda x, y: x*y, range(2,21), 1L),
2432902008176640000L
)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10)), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10), 0), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(0), 0), 0)
self.assertRaises(TypeError, reduce)
self.assertRaises(TypeError, reduce, 42, 42)
self.assertRaises(TypeError, reduce, 42, 42, 42)
self.assertEqual(reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, reduce, 42, (42, 42))
class TestCmpToKey(unittest.TestCase):
def test_cmp_to_key(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=functools.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_hash(self):
def mycmp(x, y):
return y - x
key = functools.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash(k))
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(str):
pass
self.assertTrue(A("a") < A("b"))
self.assertTrue(A("b") > A("a"))
self.assertTrue(A("a") <= A("b"))
self.assertTrue(A("b") >= A("a"))
self.assertTrue(A("b") <= A("b"))
self.assertTrue(A("b") >= A("b"))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_bug_10042(self):
@functools.total_ordering
class TestTO:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, TestTO):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, TestTO):
return self.value < other.value
raise TypeError
with self.assertRaises(TypeError):
TestTO(8) <= ()
def test_main(verbose=None):
test_classes = (
TestPartial,
TestPartialSubclass,
TestPythonPartial,
TestUpdateWrapper,
TestTotalOrdering,
TestWraps,
TestReduce,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == '__main__':
test_main(verbose=True)
|
dalegregory/odoo | refs/heads/8.0 | addons/project/res_partner.py | 334 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
return {
partner_id: Task.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
for partner_id in ids
}
""" Inherits partner and adds Tasks information in the partner form """
_inherit = 'res.partner'
_columns = {
'task_ids': fields.one2many('project.task', 'partner_id', 'Tasks'),
'task_count': fields.function(_task_count, string='# Tasks', type='integer'),
}
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default['task_ids'] = []
return super(res_partner, self).copy(
cr, uid, record_id, default=default, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
markoshorro/gem5 | refs/heads/master | src/arch/mips/MipsTLB.py | 69 | # -*- mode:python -*-
# Copyright (c) 2007 MIPS Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jaidev Patwardhan
# Korey Sewell
from m5.SimObject import SimObject
from m5.params import *
from BaseTLB import BaseTLB
class MipsTLB(BaseTLB):
type = 'MipsTLB'
cxx_class = 'MipsISA::TLB'
cxx_header = 'arch/mips/tlb.hh'
size = Param.Int(64, "TLB size")
|
HyperBaton/ansible | refs/heads/devel | lib/ansible/modules/network/panos/_panos_check.py | 41 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_check
short_description: check if PAN-OS device is ready for configuration
description:
- Check if PAN-OS device is ready for being configured (no pending jobs).
- The check could be done once or multiple times until the device is ready.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
options:
timeout:
description:
- timeout of API calls
required: false
default: 0
interval:
description:
- time waited between checks
required: false
default: 0
extends_documentation_fragment: panos
'''
EXAMPLES = '''
# single check on 192.168.1.1 with credentials admin/admin
- name: check if ready
panos_check:
ip_address: "192.168.1.1"
password: "admin"
# check for 10 times, every 30 seconds, if device 192.168.1.1
# is ready, using credentials admin/admin
- name: wait for reboot
panos_check:
ip_address: "192.168.1.1"
password: "admin"
register: result
until: result is not failed
retries: 10
delay: 30
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
import time
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def check_jobs(jobs, module):
job_check = False
for j in jobs:
status = j.find('.//status')
if status is None:
return False
if status.text != 'FIN':
return False
job_check = True
return job_check
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
timeout=dict(default=0, type='int'),
interval=dict(default=0, type='int')
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
timeout = module.params['timeout']
interval = module.params['interval']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password,
timeout=60
)
checkpnt = time.time() + timeout
while True:
try:
xapi.op(cmd="show jobs all", cmd_xml=True)
except Exception:
pass
else:
jobs = xapi.element_root.findall('.//job')
if check_jobs(jobs, module):
module.exit_json(changed=True, msg="okey dokey")
if time.time() > checkpnt:
break
time.sleep(interval)
module.fail_json(msg="Timeout")
if __name__ == '__main__':
main()
|
patrick1981/LibraryTools | refs/heads/master | Sushi_Project/resourceList.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
resources = { # sage
'VISN': {
'ajr': {
'host': 'https://www.ajronline.org/',
'api_key': '',
'requestor_id': 'LS730876',
'customer_id': '728125',
},
"highwirePress":{
'host':'https://hwdpapi.highwire.org/sushi/',
'api_key':"patrick.duff2@va.gov|Pd05091981",
'requestor_id':"patrick.duff2@va.gov",
'customer_id':'patrick.duff2@va.gov'
},
'''
"bmj":{'host':'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key':"",
'requestor_id':"30deea75-f63e-42b6-9847-3a56d70a7a69",
'customer_id':'947732'
},
"oxfordjournals":{
'host':'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key':"",
'requestor_id':"7068e173-0984-42b4-8e2b-c449811c4228",
'customer_id':'4026508'
},
# all visn journals
"cambridge":{
'host':'https://event-aggregation-api-prod-platform.prod.aop.cambridge.org/', 'api_key':"",
'requestor_id':"8a94032b5413f999015413ffd8cd0b9f",
'customer_id':'8a94032b5413f999015413ffd8cd0b9f'
},'''
'jsor':{ 'host':'https://www.jstor.org/sushi/',
'requestor_id':"visn1vane",
'api_key':"",
'customer_id':'newengland.va.gov'
},
##'jstor':{ 'host':'https://www.jstor.org/sushi/','requestor_id':"visn1vane", 'api_key':"", 'customer_id':'newengland.va.gov'},
"wiley":{ 'host':'https://onlinelibrary.wiley.com/',
'api_key':"",
'requestor_id':"patrick.duff2@va.gov",
'customer_id':'EAL00000141442'}, #only HQ report
#"wiley":{ 'host':'https://onlinelibrary.wiley.com/', 'api_key':"", 'requestor_id':"patrick.duff2@va.gov",
#'customer_id':'EAL00000093833'}, # all sites in one
'tandf':{ 'host':'https://tandfonline.com/',
'api_key':"",
'requestor_id':"vhavisn1librarye-journalteam@va.gov",
'customer_id':'2753760'
}, # all of VA sites in one report
# all of VA sites in one report
'guilford':{ 'host':'https://guilfordjournals.com/',
'api_key':"",
'requestor_id':"vhavisn1librarye-journalteam@va.gov",
'customer_id':'BAAXV'},
'mary-ann':{
'host':'https://www.liebertpub.com/',
'api_key':"", 'requestor_id':"VISN1Library",
'customer_id':'1047416'},
'''
'oxfordbooks':{
'host':'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key':"",
'requestor_id':"9ee9b648-5de3-457e-b9d3-8bd216359f65",
'customer_id':'255523'
},
'''
'ovid':{
'host':'https://stats.ovid.com/C5/sushi/',
'api_key':"",
'requestor_id':"b1ea3e3c-0782-3c9a-5414-b334044c1618",
'customer_id':'107723'},
'jsad':{ 'host':'https://www.jsad.com/',
'api_key':"",
'requestor_id':"VISN1Vane",
'customer_id':'CU-0927883'},
'proquest':{ 'host':'https://sushi.proquest.com/counter/r5/', 'requestor_id':"", 'api_key':"BMCMXYUOZM5QH0K7",
'customer_id':'29039'},
'ebsco':{ 'host':'https://sushi.ebscohost.com/R5/', 'requestor_id':"98303faf-1ff8-4427-bed1-afc9b2dbf0f7",
'api_key':"", 'customer_id':'s2341910'},
'mcgraw':{ 'host':'http://sitemaster.mhmedical.com/sushi/','requestor_id':"4027650", 'api_key':"341b96b3-2eda-4ac5-972d-39ab412f97e6", 'customer_id':'103484'},
'nrc':{ 'host':'https://www.nrcresearchpress.com/','requestor_id':"vhavisn1librarye-journalteam@va.gov", 'api_key':"", 'customer_id':'71536-19914-csp'},
'sage':{ 'host':'https://journals.sagepub.com/', 'requestor_id':"usveterans", 'api_key':"",
'customer_id':'1000547198'},
'rsna':{ 'host':'https://pubs.rsna.org/','requestor_id':"usveterans", 'api_key':"", 'customer_id':'1000547198'},
'japma':{ 'host':'https://www.japmaonline.org/','requestor_id':"vhavisn1librarye-journalteam@va.gov", 'api_key':"", 'customer_id':'PMAS132041'},
'project_hope':{ 'host':'https://www.healthaffairs.org/','requestor_id':"vhavisn1librarye-journalteam@va.gov", 'api_key':"", 'customer_id':'HR240167'},
'jospt':{ 'host':'https://www.jospt.org/','requestor_id':"vhavisn1librarye-journalteam@va.gov", 'api_key':"", 'customer_id':'321963'},
'annual reviews':{ 'host':'https://www.annualreviews.org/','requestor_id':"VISN1_VANE", 'api_key':"", 'customer_id':'391320'},
'operative_dentistry':{ 'host':'https://www.joionline.org/','requestor_id':"vhavisn1librarye-journalteam@va.gov", 'api_key':"", 'customer_id':'ODNTFE205365'},
'jama':{ 'host':'http://sitemaster.mhmedical.com/sushi/','requestor_id':"1817798", 'api_key':"2ccd8140-5235-40c9-a2c8-d8dde4cd7702", 'customer_id':'43428'},
'annual':{ 'host':'https://journals.sagepub.com/','requestor_id':"VISN1_VANE", 'api_key':"", 'customer_id':'391320'},
'jns':{ 'host':'https://ams.thejns.org/rest/COUNTER/v5/','requestor_id':"", 'api_key':"l7URGmhxgfiANJma/DmVDQ==", 'customer_id':'1429'},
'aps':{ 'host':'https://www.physiology.org/','requestor_id':"vhavisn1librarye-journalteam@va.gov", 'api_key':"", 'customer_id':'00105156'}
},
'Bedford': {
'mcgraw': {
'host': 'http://sitemaster.mhmedical.com/sushi/',
'requestor_id': "4027650",
'api_key': "341b96b3-2eda-4ac5-972d-39ab412f97e6",
'customer_id': '103484'
},
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '733272',
},'''
'oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': 'd5efc2bf-acbe-4103-98bc-cc6ba8574b4b',
'customer_id': '4358218',
},'''
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': 'b1ea3e3c-0782-3c9a-5414-d93454bc0df8',
'customer_id': '32841',
},
'tandf': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '1674185',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141414',
'customer_id': 'EAL00000141414',
},
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': '7NW3XBF8XW8IP6W9',
'requestor_id': '',
'customer_id': '30090',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': 'cfd78867-f467-47cc-a2c5-6f2ff080fa3c',
'customer_id': 's7241825',
},
'sage': {
'host': 'https://journals.sagepub.com/',
'api_key': '',
'requestor_id': 'usveterans',
'customer_id': '1000458808',
},
},
'Boston': {
'ingenta': {
'host': 'https://www.ingentaconnect.com/admin/statistics/sushiservice/',
'api_key': '',
'requestor_id': '6ad50b7124df4bfb',
'customer_id': 'cid-53007186',
},
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '25122',
},
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': 'OANFLOPRFBFNT9P1',
'requestor_id': '',
'customer_id': '30093',
},
'tandf': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '1557663',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': 'a2191248-2dc8-4e32-af99-14effabd3c51',
'customer_id': 's4232390',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-677354e4386a',
'customer_id': '97015',
},'''
'oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': 'cd21232c-78df-47b0-b907-ff4d33dc7458',
'customer_id': '4358231',
},'''
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141416',
'customer_id': 'EAL00000141416',
},
},
'Central Western Mass': {
'''
'oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': 'e52ee127-d431-410c-b615-4c81bfd2d174',
'customer_id': '4359000',
},'''
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '1948374',
},
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': '7ASP5ZCX7N0SMVI2',
'requestor_id': '',
'customer_id': '30098',
},
'tandf': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '2134831',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': '2ab49f7f-7994-4714-a256-b561af7baab8',
'customer_id': 's1204207',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-697380b14bf2',
'customer_id': '8695',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141419',
'customer_id': 'EAL00000141419',
},
'sage': {
'host': 'https://journals.sagepub.com/',
'api_key': '',
'requestor_id': 'usveterans',
'customer_id': '1000641635',
},
},
'Manchester': {
'ingenta': {
'host': 'https://www.ingentaconnect.com/admin/statistics/sushiservice/',
'api_key': '',
'requestor_id': '6f907c7e0ce19be5',
'customer_id': 'id22102147',
},
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '1626293',
},
'''
'oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': 'bb944ece-1d08-4568-bc35-55a6dd86c94a',
'customer_id': '4359065',
},'''
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'requestor_id': '',
'api_key': 'SAZAKSN6DMZ52A64',
'customer_id': '30091',
},
'tandf': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '2142347',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-6c7318fc5eb3',
'customer_id': '98878',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': 'd4536bbc-ebbc-46b3-b564-ce19faf57fbc',
'customer_id': 's2340606',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141415',
'customer_id': 'EAL00000141415',
}
},
'Providence': {
'''
'Oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '0535ecef-a3d0-4198-81d7-06f47873a48f',
'customer_id': '4358456',
},'''
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '4938',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': 'eee9f35d-83a8-499e-b071-53b8971044cd',
'customer_id': 's2340195',
},
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': '1Y0XIOAN1GQZZA00',
'requestor_id': '',
'customer_id': '30095',
},
'tandf': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '1573929',
},
'sage': {
'host': 'https://journals.sagepub.com/',
'api_key': '',
'requestor_id': 'usveterans',
'customer_id': '1000685004',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-6e73c4a67b70',
'customer_id': '5266',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141443',
'customer_id': 'EAL00000141443',
},
},
'Togus': {
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '9827',
},'''
'Oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '894ade0a-5cff-49d1-85b1-ae6b0635054c',
'customer_id': '4358989',
},'''
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': '733PHWJ5X834BBT3',
'requestor_id': '',
'customer_id': '46329',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': 'd675c177-1f06-459e-a041-d7e31286ec55',
'customer_id': 's7477452',
},
'TandF': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '3270637',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-6e739c135a70',
'customer_id': '395',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141441',
'customer_id': 'EAL00000141441',
},
},
'Conneticut': {
'''
'oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '4fac9e6b-bd9d-4211-914f-ba4325dfdfa9',
'customer_id': '4358328',
},'''
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '1626287',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'EAL00000141418EAL00000141419',
'customer_id': 'EAL00000141418EAL00000141419',
},
'proquest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': 'QGLB5SGLEE4KARHG',
'requestor_id': '',
'customer_id': '30097',
},
'tandf': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '1575836',
},
'sage': {
'host': 'https://journals.sagepub.com/',
'api_key': '',
'requestor_id': 'usveterans',
'customer_id': '1000458778',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-7573d408cd8c',
'customer_id': '91281',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': 'd675c177-1f06-459e-a041-d7e31286ec55',
'customer_id': 's2344260',
},
},
'White River Junction': {
'''
'oxford': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '25e5233b-74a0-4c5e-acea-b43f132f430c',
'customer_id': '4358387',
},'''
'bmj': {
'host': 'https://sushi5.scholarlyiq.com/counter/r5/',
'api_key': '',
'requestor_id': '30deea75-f63e-42b6-9847-3a56d70a7a69',
'customer_id': '1516270',
},
'ProQuest': {
'host': 'https://sushi.proquest.com/counter/r5/',
'api_key': 'QK1W9G7KVJPBK5A2',
'requestor_id': '',
'customer_id': '30099',
},
'TandF': {
'host': 'https://tandfonline.com/',
'api_key': '',
'requestor_id': 'vhavisn1librarye-journalteam@va.gov',
'customer_id': '1944602',
},
'ovid': {
'host': 'https://stats.ovid.com/C5/sushi/',
'api_key': '',
'requestor_id': '9295320c-2719-228d-5414-7873a04eb3cd',
'customer_id': '3239',
},
'ebsco': {
'host': 'https://sushi.ebscohost.com/R5/',
'api_key': '',
'requestor_id': '4f096ff0-d261-43d9-8cfd-e9d75e63d1dc',
'customer_id': 's2342326',
},
'wiley': {
'host': 'https://onlinelibrary.wiley.com/',
'api_key': '',
'requestor_id': 'patrick.duff2@va.gov',
'customer_id': 'EAL00000141440',
}
}
}
|
MiLk/youtube-dl | refs/heads/master | youtube_dl/extractor/cspan.py | 12 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unescapeHTML,
find_xpath_attr,
)
class CSpanIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?c-span\.org/video/\?(?P<id>[0-9a-f]+)'
IE_DESC = 'C-SPAN'
_TESTS = [{
'url': 'http://www.c-span.org/video/?313572-1/HolderonV',
'md5': '8e44ce11f0f725527daccc453f553eb0',
'info_dict': {
'id': '315139',
'ext': 'mp4',
'title': 'Attorney General Eric Holder on Voting Rights Act Decision',
'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
},
'skip': 'Regularly fails on travis, for unknown reasons',
}, {
'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models',
# For whatever reason, the served video alternates between
# two different ones
#'md5': 'dbb0f047376d457f2ab8b3929cbb2d0c',
'info_dict': {
'id': '340723',
'ext': 'mp4',
'title': 'International Health Care Models',
'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_id = mobj.group('id')
webpage = self._download_webpage(url, page_id)
video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id')
description = self._html_search_regex(
[
# The full description
r'<div class=\'expandable\'>(.*?)<a href=\'#\'',
# If the description is small enough the other div is not
# present, otherwise this is a stripped version
r'<p class=\'initial\'>(.*?)</p>'
],
webpage, 'description', flags=re.DOTALL)
info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id
data = self._download_json(info_url, video_id)
doc = self._download_xml(
'http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
video_id)
title = find_xpath_attr(doc, './/string', 'name', 'title').text
thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text
files = data['video']['files']
entries = [{
'id': '%s_%d' % (video_id, partnum + 1),
'title': (
title if len(files) == 1 else
'%s part %d' % (title, partnum + 1)),
'url': unescapeHTML(f['path']['#text']),
'description': description,
'thumbnail': thumbnail,
'duration': int_or_none(f.get('length', {}).get('#text')),
} for partnum, f in enumerate(files)]
return {
'_type': 'playlist',
'entries': entries,
'title': title,
'id': video_id,
}
|
ikaee/bfr-attendant | refs/heads/master | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/crf/python/kernel_tests/crf_test.py | 57 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CRF."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.crf.python.ops import crf
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CrfTest(test.TestCase):
def testCrfSequenceScore(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
tag_indices = np.array([1, 2, 1, 0], dtype=np.int32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
sequence_score = crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
sequence_score = array_ops.squeeze(sequence_score, [0])
tf_sequence_score = sess.run(sequence_score)
expected_unary_score = sum(inputs[i][tag_indices[i]]
for i in range(sequence_lengths))
expected_binary_score = sum(
transition_params[tag_indices[i], tag_indices[i + 1]]
for i in range(sequence_lengths - 1))
expected_sequence_score = expected_unary_score + expected_binary_score
self.assertAllClose(tf_sequence_score, expected_sequence_score)
def testCrfUnaryScore(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
tag_indices = np.array([1, 2, 1, 0], dtype=np.int32)
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
unary_score = crf.crf_unary_score(
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
inputs=array_ops.expand_dims(inputs, 0))
unary_score = array_ops.squeeze(unary_score, [0])
tf_unary_score = sess.run(unary_score)
expected_unary_score = sum(inputs[i][tag_indices[i]]
for i in range(sequence_lengths))
self.assertAllClose(tf_unary_score, expected_unary_score)
def testCrfBinaryScore(self):
tag_indices = np.array([1, 2, 1, 0], dtype=np.int32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
binary_score = crf.crf_binary_score(
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
binary_score = array_ops.squeeze(binary_score, [0])
tf_binary_score = sess.run(binary_score)
expected_binary_score = sum(
transition_params[tag_indices[i], tag_indices[i + 1]]
for i in range(sequence_lengths - 1))
self.assertAllClose(tf_binary_score, expected_binary_score)
def testCrfLogNorm(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
sequence_lengths = np.array(3, dtype=np.int32)
with self.test_session() as sess:
all_sequence_scores = []
# Compare the dynamic program with brute force computation.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
all_sequence_scores.append(
crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params)))
brute_force_log_norm = math_ops.reduce_logsumexp(all_sequence_scores)
log_norm = crf.crf_log_norm(
inputs=array_ops.expand_dims(inputs, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
log_norm = array_ops.squeeze(log_norm, [0])
tf_brute_force_log_norm, tf_log_norm = sess.run(
[brute_force_log_norm, log_norm])
self.assertAllClose(tf_log_norm, tf_brute_force_log_norm)
def testCrfLogLikelihood(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_log_likelihoods = []
# Make sure all probabilities sum to 1.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
sequence_log_likelihood, _ = crf.crf_log_likelihood(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
all_sequence_log_likelihoods.append(sequence_log_likelihood)
total_log_likelihood = math_ops.reduce_logsumexp(
all_sequence_log_likelihoods)
tf_total_log_likelihood = sess.run(total_log_likelihood)
self.assertAllClose(tf_total_log_likelihood, 0.0)
def testLengthsToMasks(self):
with self.test_session() as sess:
sequence_lengths = [4, 1, 8, 2]
max_sequence_length = max(sequence_lengths)
mask = crf._lengths_to_masks(sequence_lengths, max_sequence_length)
tf_mask = sess.run(mask)
self.assertEqual(len(tf_mask), len(sequence_lengths))
for m, l in zip(tf_mask, sequence_lengths):
self.assertAllEqual(m[:l], [1] * l)
self.assertAllEqual(m[l:], [0] * (len(m) - l))
def testViterbiDecode(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
sequence_lengths = np.array(3, dtype=np.int32)
num_words = inputs.shape[0]
num_tags = inputs.shape[1]
with self.test_session() as sess:
all_sequence_scores = []
all_sequences = []
# Compare the dynamic program with brute force computation.
for tag_indices in itertools.product(
range(num_tags), repeat=sequence_lengths):
tag_indices = list(tag_indices)
tag_indices.extend([0] * (num_words - sequence_lengths))
all_sequences.append(tag_indices)
sequence_score = crf.crf_sequence_score(
inputs=array_ops.expand_dims(inputs, 0),
tag_indices=array_ops.expand_dims(tag_indices, 0),
sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
transition_params=constant_op.constant(transition_params))
sequence_score = array_ops.squeeze(sequence_score, [0])
all_sequence_scores.append(sequence_score)
tf_all_sequence_scores = sess.run(all_sequence_scores)
expected_max_sequence_index = np.argmax(tf_all_sequence_scores)
expected_max_sequence = all_sequences[expected_max_sequence_index]
expected_max_score = tf_all_sequence_scores[expected_max_sequence_index]
actual_max_sequence, actual_max_score = crf.viterbi_decode(
inputs[:sequence_lengths], transition_params)
self.assertAllClose(actual_max_score, expected_max_score)
self.assertEqual(actual_max_sequence,
expected_max_sequence[:sequence_lengths])
if __name__ == "__main__":
test.main()
|
arcticshores/kivy | refs/heads/master | examples/widgets/asyncimage.py | 38 | '''
Asynchronous image loading
==========================
Test of the widget AsyncImage.
We are just putting it in a CenteredAsyncImage for beeing able to center the
image on screen without doing upscale like the original AsyncImage.
'''
from kivy.app import App
from kivy.uix.image import AsyncImage
from kivy.lang import Builder
Builder.load_string('''
<CenteredAsyncImage>:
size: self.texture_size
size_hint: None, None
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
''')
class CenteredAsyncImage(AsyncImage):
pass
class TestAsyncApp(App):
def build(self):
return CenteredAsyncImage(
source='http://kivy.org/funny-pictures-cat-is-expecting-you.jpg')
if __name__ == '__main__':
TestAsyncApp().run()
|
sabi0/intellij-community | refs/heads/master | python/testData/codeInsight/classMRO/MetaClassDeclaredThroughAncestor.py | 20 | class Meta(type):
pass
class Base(metaclass=Meta):
pass
class MyClass(Base):
pass |
djkskqyr3/CSipSimple | refs/heads/master | jni/pjsip/sources/pjsip-apps/build/get-footprint.py | 59 | # $Id: get-footprint.py 1352 2007-06-08 01:41:25Z bennylp $
#
# This file is used to generate PJSIP/PJMEDIA footprint report.
# To use this file, just run it in pjsip-apps/build directory, to
# produce footprint.txt and footprint.htm report files.
#
import os
import sys
import string
import time
compile_flags1 = [
# Base
['BASE', 'Empty application size'],
['', 'Subtotal: Empty application size'],
['HAS_PJLIB', 'Minimum PJLIB only'],
# Subtotal
['', 'Subtotal'],
# PJLIB-UTIL
['HAS_PJLIB_STUN', 'STUN client'],
['HAS_PJLIB_GETOPT', 'getopt() functionality'],
# Subtotal
['', 'TOTAL']
]
compile_flags = [
# Base
['BASE', 'Empty application size'],
['', 'Subtotal: empty application size on this platform'],
['HAS_PJLIB', 'PJLIB (pool, data structures, hash tables, ioqueue, socket, timer heap, etc.). ' +
'For targets that statically link application with LIBC, the size includes ' +
'various LIBC functions that are used by PJLIB.'],
['', 'Subtotal: Application linked with PJLIB'],
# PJLIB-UTIL
['HAS_PJLIB_STUN', 'PJLIB-UTIL STUN client'],
['HAS_PJLIB_GETOPT', 'PJLIB-UTIL getopt() functionality'],
['HAS_PJLIB_SCANNER', 'PJLIB-UTIL text scanner (needed by SIP parser)'],
['HAS_PJLIB_XML', 'PJLIB-UTIL tiny XML (parsing and API) (needs text scanner)'],
['HAS_PJLIB_DNS', 'PJLIB-UTIL DNS packet and parsing'],
['HAS_PJLIB_RESOLVER', 'PJLIB-UTIL Asynchronous DNS resolver/caching engine'],
['HAS_PJLIB_CRC32', 'PJLIB-UTIL CRC32 algorithm'],
['HAS_PJLIB_HMAC_MD5', 'PJLIB-UTIL HMAC-MD5 algorithm'],
['HAS_PJLIB_HMAC_SHA1', 'PJLIB-UTIL HMAC-SHA1 algorithm'],
# PJSIP
['HAS_PJSIP_CORE_MSG_ELEM', 'PJSIP Core - Messaging Elements and Parsing (message, headers, SIP URI, TEL URI/RFC 3966, etc.)'],
['HAS_PJSIP_CORE', 'PJSIP Core - Endpoint (transport management, module management, event distribution, etc.)'],
['HAS_PJSIP_CORE_MSG_UTIL', 'PJSIP Core - Stateless operations, SIP SRV, server resolution and fail-over'],
['HAS_PJSIP_UDP_TRANSPORT', 'PJSIP UDP transport'],
['', 'Subtotal: A minimalistic SIP application (parsing, UDP transport+STUN, no transaction)'],
['HAS_PJSIP_TCP_TRANSPORT', 'PJSIP TCP transport'],
['HAS_PJSIP_TLS_TRANSPORT', 'PJSIP TLS transport'],
['HAS_PJSIP_INFO', 'PJSIP INFO support (RFC 2976) (no special treatment, thus the zero size)'],
['HAS_PJSIP_TRANSACTION', 'PJSIP transaction and stateful API'],
['HAS_PJSIP_AUTH_CLIENT', 'PJSIP digest authentication client'],
['HAS_PJSIP_UA_LAYER', 'PJSIP User agent layer and base dialog and usage management (draft-ietf-sipping-dialogusage-01)'],
['HAS_PJMEDIA_SDP', 'PJMEDIA SDP Parsing and API (RFC 2327), needed by SDP negotiator'],
['HAS_PJMEDIA_SDP_NEGOTIATOR','PJMEDIA SDP negotiator (RFC 3264), needed by INVITE session'],
['HAS_PJSIP_INV_SESSION', 'PJSIP INVITE session API'],
['HAS_PJSIP_REGC', 'PJSIP client registration API'],
['', 'Subtotal: Minimal SIP application with registration (including digest authentication)'],
['HAS_PJSIP_EVENT_FRAMEWORK','PJSIP Event/SUBSCRIBE framework, RFC 3265 (needed by call transfer, and presence)'],
['HAS_PJSIP_CALL_TRANSFER', 'PJSIP Call Transfer/REFER support (RFC 3515)'],
['', 'Subtotal: Minimal SIP application with call transfer'],
['HAS_PJSIP_PRESENCE', 'PJSIP Presence subscription, including PIDF/X-PIDF support (RFC 3856, RFC 3863, etc) (needs XML)'],
['HAS_PJSIP_MESSAGE', 'PJSIP Instant Messaging/MESSAGE support (RFC 3428) (no special treatment, thus the zero size)'],
['HAS_PJSIP_IS_COMPOSING', 'PJSIP Message Composition indication (RFC 3994)'],
# Subtotal
['', 'Subtotal: Complete PJSIP package (call, registration, presence, IM) +STUN +GETOPT (+PJLIB), no media'],
# PJNATH
['HAS_PJNATH_STUN', 'PJNATH STUN'],
['HAS_PJNATH_ICE', 'PJNATH ICE'],
# PJMEDIA
['HAS_PJMEDIA_EC', 'PJMEDIA accoustic echo cancellation'],
['HAS_PJMEDIA_SND_DEV', 'PJMEDIA sound device backend (platform specific)'],
['HAS_PJMEDIA_SILENCE_DET', 'PJMEDIA Adaptive silence detector'],
['HAS_PJMEDIA', 'PJMEDIA endpoint'],
['HAS_PJMEDIA_PLC', 'PJMEDIA Packet Lost Concealment implementation (needed by G.711, GSM, and sound device port)'],
['HAS_PJMEDIA_SND_PORT', 'PJMEDIA sound device media port'],
['HAS_PJMEDIA_RESAMPLE', 'PJMEDIA resampling algorithm (large filter disabled)'],
['HAS_PJMEDIA_G711_CODEC', 'PJMEDIA G.711 codec (PCMA/PCMU, including PLC) (may have already been linked by other module)'],
['HAS_PJMEDIA_CONFERENCE', 'PJMEDIA conference bridge (needs resampling and silence detector)'],
['HAS_PJMEDIA_MASTER_PORT', 'PJMEDIA master port'],
['HAS_PJMEDIA_RTP', 'PJMEDIA stand-alone RTP'],
['HAS_PJMEDIA_RTCP', 'PJMEDIA stand-alone RTCP and media quality calculation'],
['HAS_PJMEDIA_JBUF', 'PJMEDIA stand-alone adaptive jitter buffer'],
['HAS_PJMEDIA_STREAM', 'PJMEDIA stream for remote media communication (needs RTP, RTCP, and jitter buffer)'],
['HAS_PJMEDIA_TONEGEN', 'PJMEDIA tone generator'],
['HAS_PJMEDIA_UDP_TRANSPORT','PJMEDIA UDP media transport'],
['HAS_PJMEDIA_FILE_PLAYER', 'PJMEDIA WAV file player'],
['HAS_PJMEDIA_FILE_CAPTURE', 'PJMEDIA WAV file writer'],
['HAS_PJMEDIA_MEM_PLAYER', 'PJMEDIA fixed buffer player'],
['HAS_PJMEDIA_MEM_CAPTURE', 'PJMEDIA fixed buffer writer'],
['HAS_PJMEDIA_ICE', 'PJMEDIA ICE transport'],
# Subtotal
['', 'Subtotal: Complete SIP and all PJMEDIA features (G.711 codec only)'],
# Codecs
['HAS_PJMEDIA_GSM_CODEC', 'PJMEDIA GSM codec (including PLC)'],
['HAS_PJMEDIA_SPEEX_CODEC', 'PJMEDIA Speex codec (narrowband, wideband, ultra-wideband)'],
['HAS_PJMEDIA_ILBC_CODEC', 'PJMEDIA iLBC codec'],
# Total
['', 'TOTAL: complete libraries (+all codecs)'],
]
# Executable size report, tuple of:
# <all flags>, <flags added>, <text size>, <data>, <bss>, <description>
exe_size = []
#
# Write the report to text file
#
def print_text_report(filename):
output = open(filename, 'w')
output.write('PJSIP and PJMEDIA footprint report\n')
output.write('Auto-generated by pjsip-apps/build/get-footprint.py\n')
output.write('\n')
# Write Revision info.
f = os.popen('svn info | grep Revision')
output.write(f.readline())
output.write('Date: ')
output.write(time.asctime())
output.write('\n')
output.write('\n')
# Write individual module size
output.write('Footprint (in bytes):\n')
output.write(' .text .data .bss Module Description\n')
output.write('==========================================================\n')
for i in range(1, len(exe_size)):
e = exe_size[i]
prev = exe_size[i-1]
if e[1]<>'':
output.write(' ')
output.write( string.rjust(`string.atoi(e[2]) - string.atoi(prev[2])`, 8) )
output.write( string.rjust(`string.atoi(e[3]) - string.atoi(prev[3])`, 8) )
output.write( string.rjust(`string.atoi(e[4]) - string.atoi(prev[4])`, 8) )
output.write(' ' + e[5] + '\n')
else:
output.write(' ------------------------\n')
output.write(' ')
output.write( string.rjust(e[2], 8) )
output.write( string.rjust(e[3], 8) )
output.write( string.rjust(e[4], 8) )
output.write(' ' + e[5] + '\n')
output.write('\n')
# Done
output.close()
#
# Write the report to HTML file
#
def print_html_report():
# Get Revision info.
f = os.popen('svn info | grep Revision')
revision = f.readline().split()[1]
# Get Machine, OS, and CC name
f = os.popen('make -f Footprint.mak print_name')
names = f.readline().split()
m = names[0]
o = names[1]
cc = names[2]
cc_ver = names[3]
# Open HTML file
filename = 'footprint-' + m + '-' + o + '.htm'
output = open(filename, 'w')
title = 'PJSIP and PJMEDIA footprint report for ' + m + '-' + o + ' target'
output.write('<HTML><HEAD>\n');
output.write(' <TITLE>' + title + '</TITLE>\n')
output.write(' <LINK href="/style/style.css" type="text/css" rel="stylesheet">\n')
output.write('</HEAD>\n');
output.write('<BODY bgcolor="white">\n');
output.write('<!--#include virtual="/header.html" -->')
output.write(' <H1>' + title + '</H1>\n')
output.write('Auto-generated by pjsip-apps/build/get-footprint.py script\n')
output.write('<p>Date: ' + time.asctime() + '<BR>\n')
output.write('Revision: r' + revision + '</p>\n\n')
output.write('<HR>\n')
output.write('\n')
# Info
output.write('<H2>Build Configuration</H2>\n')
# build.mak
output.write('\n<H3>build.mak</H3>\n')
output.write('<tt>\n')
f = open('../../build.mak', 'r')
s = f.readlines()
for l in s:
output.write(l + '<BR>\n')
output.write('</tt>\n')
output.write('<p>Using ' + cc + ' version ' + cc_ver +'</p>\n')
# user.mak
output.write('\n<H3>user.mak</H3>\n')
output.write('<tt>\n')
f = open('../../user.mak', 'r')
s = f.readlines()
for l in s:
output.write(l + '<BR>\n')
output.write('</tt>\n')
# config_site.h
output.write('\n<H3><pj/config.site.h></H3>\n')
output.write('<tt>\n')
f = os.popen('cpp -dM -I../../pjlib/include ../../pjlib/include/pj/config_site.h | grep PJ')
s = f.readlines()
for l in s:
output.write(l + '<BR>\n')
output.write('</tt>\n')
# Write individual module size
output.write('<H2>Footprint Report</H2>\n')
output.write('<p>The table below shows the footprint of individual feature, in bytes.</p>')
output.write('<TABLE border="1" cellpadding="2" cellspacing="0">\n' +
'<TR bgcolor="#e8e8ff">\n' +
' <TD align="center"><strong>.text</strong></TD>\n' +
' <TD align="center"><strong>.data</strong></TD>\n' +
' <TD align="center"><strong>.bss</strong></TD>\n' +
' <TD align="center"><strong>Features/Module Description</strong></TD>\n' +
'</TR>\n')
for i in range(1, len(exe_size)):
e = exe_size[i]
prev = exe_size[i-1]
output.write('<TR>\n')
if e[1]<>'':
output.write( ' <TD align="right">' + `string.atoi(e[2]) - string.atoi(prev[2])` + '</TD>\n')
output.write( ' <TD align="right">' + `string.atoi(e[3]) - string.atoi(prev[3])` + '</TD>\n')
output.write( ' <TD align="right">' + `string.atoi(e[4]) - string.atoi(prev[4])` + '</TD>\n' )
output.write( ' <TD>' + e[5] + '</TD>\n')
else:
empty_size = exe_size[1]
output.write('<TR bgcolor="#e8e8ff">\n')
output.write( ' <TD align="right"> </TD>\n')
output.write( ' <TD align="right"> </TD>\n')
output.write( ' <TD align="right"> </TD>\n')
output.write( ' <TD><strong>' + e[5] + ': .text=' + e[2]+ ', .data=' + e[3] + ', .bss=' + e[4] )
output.write( '\n </strong> <BR>(Size minus empty application size: ' + \
'.text=' + `string.atoi(e[2]) - string.atoi(empty_size[2])` + \
', .data=' + `string.atoi(e[3]) - string.atoi(empty_size[3])` + \
', .data=' + `string.atoi(e[4]) - string.atoi(empty_size[4])` + \
')\n' )
output.write( ' </TD>\n')
output.write('</TR>\n')
output.write('</TABLE>\n')
output.write('<!--#include virtual="/footer.html" -->')
output.write('</BODY>\n')
output.write('</HTML>\n')
# Done
output.close()
#
# Get the size of individual feature
#
def get_size(all_flags, flags, desc):
file = 'footprint.exe'
# Remove file
rc = os.system("make -f Footprint.mak FCFLAGS='" + all_flags + "' clean")
# Make the executable
cmd = "make -f Footprint.mak FCFLAGS='" + all_flags + "' all"
#print cmd
rc = os.system(cmd)
if rc <> 0:
sys.exit(1)
# Run 'size' against the executable
f = os.popen('size ' + file)
# Skip header of the 'size' output
f.readline()
# Get the sizes
size = f.readline()
f.close()
# Split into tokens
tokens = size.split()
# Build the size tuple and add to exe_size
elem = all_flags, flags, tokens[0], tokens[1], tokens[2], desc
exe_size.append(elem)
# Remove file
rc = os.system("make -f Footprint.mak FCFLAGS='" + all_flags + "' clean")
# Main
elem = '', '', '0', '0', '0', ''
exe_size.append(elem)
all_flags = ''
for elem in compile_flags:
if elem[0] <> '':
flags = '-D' + elem[0]
all_flags += flags + ' '
get_size(all_flags, elem[0], elem[1])
else:
e = exe_size[len(exe_size)-1]
n = all_flags, '', e[2], e[3], e[4], elem[1]
exe_size.append(n)
#print_text_report('footprint.txt')
print_html_report()
|
xHeliotrope/injustice_dropper | refs/heads/master | env/lib/python3.4/site-packages/pip/_vendor/cachecontrol/wrapper.py | 953 | from .adapter import CacheControlAdapter
from .cache import DictCache
def CacheControl(sess,
cache=None,
cache_etags=True,
serializer=None,
heuristic=None):
cache = cache or DictCache()
adapter = CacheControlAdapter(
cache,
cache_etags=cache_etags,
serializer=serializer,
heuristic=heuristic,
)
sess.mount('http://', adapter)
sess.mount('https://', adapter)
return sess
|
karllessard/tensorflow | refs/heads/master | tensorflow/python/ops/list_ops.py | 1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import handle_data_util
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
# list_ops -> control_flow_ops -> tensor_array_ops -> list_ops
control_flow_ops = LazyLoader(
"control_flow_ops", globals(),
"tensorflow.python.ops.control_flow_ops")
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def empty_tensor_list(element_shape,
element_dtype,
max_num_elements=None,
name=None):
if max_num_elements is None:
max_num_elements = -1
return gen_list_ops.empty_tensor_list(
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
max_num_elements=max_num_elements,
name=name)
def _set_handle_data(list_handle, element_shape, element_dtype):
"""Sets type information on `list_handle` for consistency with graphs."""
# TODO(b/169968286): It would be better if we had a consistent story for
# creating handle data from eager operations (shared with VarHandleOp).
if isinstance(list_handle, ops.EagerTensor):
if tensor_util.is_tensor(element_shape):
element_shape = tensor_shape.TensorShape(None)
elif not isinstance(element_shape, tensor_shape.TensorShape):
element_shape = tensor_shape.TensorShape(element_shape)
handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()
handle_data.is_set = True
handle_data.shape_and_type.append(
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=element_shape.as_proto(),
dtype=element_dtype.as_datatype_enum,
specialized_type=types_pb2.ST_TENSOR_LIST))
list_handle._handle_data = handle_data # pylint: disable=protected-access
def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None):
result = gen_list_ops.tensor_list_reserve(
element_shape=_build_element_shape(element_shape),
num_elements=num_elements,
element_dtype=element_dtype,
name=name)
# TODO(b/169968286): gen_ops needs to ensure the metadata is properly
# populated for eager operations.
_set_handle_data(result, element_shape, element_dtype)
return result
def tensor_list_from_tensor(tensor, element_shape, name=None):
tensor = ops.convert_to_tensor(tensor)
result = gen_list_ops.tensor_list_from_tensor(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
name=name)
_set_handle_data(result, tensor.shape, tensor.dtype)
return result
def tensor_list_get_item(input_handle, index, element_dtype, element_shape=None,
name=None):
return gen_list_ops.tensor_list_get_item(
input_handle=input_handle,
index=index,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
name=name)
def tensor_list_pop_back(input_handle, element_dtype, name=None):
return gen_list_ops.tensor_list_pop_back(
input_handle=input_handle,
element_shape=-1,
element_dtype=element_dtype,
name=name)
def tensor_list_gather(input_handle,
indices,
element_dtype,
element_shape=None,
name=None):
return gen_list_ops.tensor_list_gather(
input_handle=input_handle,
indices=indices,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
name=name)
def tensor_list_scatter(tensor,
indices,
element_shape=None,
input_handle=None,
name=None):
"""Returns a TensorList created or updated by scattering `tensor`."""
tensor = ops.convert_to_tensor(tensor)
if input_handle is not None:
output_handle = gen_list_ops.tensor_list_scatter_into_existing_list(
input_handle=input_handle, tensor=tensor, indices=indices, name=name)
handle_data_util.copy_handle_data(input_handle, output_handle)
return output_handle
else:
output_handle = gen_list_ops.tensor_list_scatter_v2(
tensor=tensor,
indices=indices,
element_shape=_build_element_shape(element_shape),
num_elements=-1,
name=name)
_set_handle_data(output_handle, element_shape, tensor.dtype)
return output_handle
def tensor_list_stack(input_handle,
element_dtype,
num_elements=-1,
element_shape=None,
name=None):
return gen_list_ops.tensor_list_stack(
input_handle=input_handle,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
num_elements=num_elements,
name=name)
def tensor_list_concat(input_handle, element_dtype, element_shape=None,
name=None):
# Ignore the lengths output of TensorListConcat. It is only used during
# gradient computation.
return gen_list_ops.tensor_list_concat_v2(
input_handle=input_handle,
element_dtype=element_dtype,
element_shape=_build_element_shape(element_shape),
leading_dims=ops.convert_to_tensor([], dtype=dtypes.int64),
name=name)[0]
def tensor_list_split(tensor, element_shape, lengths, name=None):
return gen_list_ops.tensor_list_split(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
lengths=lengths,
name=name)
def tensor_list_set_item(input_handle,
index,
item,
resize_if_index_out_of_bounds=False,
name=None):
"""Sets `item` at `index` in input list."""
if resize_if_index_out_of_bounds:
input_list_size = gen_list_ops.tensor_list_length(input_handle)
# TODO(srbs): This could cause some slowdown. Consider fusing resize
# functionality in the SetItem op.
input_handle = control_flow_ops.cond(
index >= input_list_size,
lambda: gen_list_ops.tensor_list_resize( # pylint: disable=g-long-lambda
input_handle, index + 1),
lambda: input_handle)
output_handle = gen_list_ops.tensor_list_set_item(
input_handle=input_handle, index=index, item=item, name=name)
handle_data_util.copy_handle_data(input_handle, output_handle)
return output_handle
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult,
element_shape=array_ops.shape(op.inputs[1]),
element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
if delement is None:
delement = array_ops.zeros_like(op.outputs[1])
return gen_list_ops.tensor_list_push_back(dlist, delement), None
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:]), None
@ops.RegisterGradient("TensorListConcat")
@ops.RegisterGradient("TensorListConcatV2")
def _TensorListConcatGrad(op, dtensor, unused_dlengths):
"""Gradient function for TensorListConcat."""
dlist = tensor_list_split(
dtensor,
element_shape=gen_list_ops.tensor_list_element_shape(
op.inputs[0], shape_type=dtypes.int32),
lengths=op.outputs[1])
if op.type == "TensorListConcatV2":
return dlist, None, None
else:
return dlist
@ops.RegisterGradient("TensorListSplit")
def _TensorListSplitGrad(op, dlist):
tensor, _, lengths = op.inputs
element_shape = array_ops.slice(array_ops.shape(tensor), [1], [-1])
element_shape = array_ops.concat([[-1], element_shape], axis=0)
return gen_list_ops.tensor_list_concat_v2(
dlist,
element_shape=element_shape,
leading_dims=lengths,
element_dtype=op.inputs[0].dtype)[0], None, None
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
t = op.inputs[0]
if t.shape.dims and t.shape.dims[0].value is not None:
num_elements = t.shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = empty_tensor_list(
element_dtype=t.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist,
element_shape=array_ops.slice(array_ops.shape(t), [1], [-1]),
element_dtype=t.dtype,
num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
element_shape_grad = None
return list_grad, index_grad, element_shape_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
"""Gradient function for TensorListSetItem."""
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = tensor_list_get_item(
dlist,
index,
element_shape=array_ops.shape(item),
element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListResize")
def _TensorListResizeGrad(op, dlist):
input_list, _ = op.inputs
input_list_size = gen_list_ops.tensor_list_length(input_list)
return gen_list_ops.tensor_list_resize(dlist, input_list_size), None
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
"""Gradient function for TensorListGather."""
input_list, indices, _ = op.inputs
element_shape = gen_list_ops.tensor_list_element_shape(
input_list, shape_type=dtypes.int32)
num_elements = gen_list_ops.tensor_list_length(input_list)
dlist = tensor_list_reserve(element_shape, num_elements, dtensor.dtype)
dlist = tensor_list_scatter(
tensor=dtensor, indices=indices, input_handle=dlist)
return dlist, None, None
@ops.RegisterGradient("TensorListScatter")
@ops.RegisterGradient("TensorListScatterV2")
def _TensorListScatterGrad(op, dlist):
"""Gradient function for TensorListScatter."""
tensor = op.inputs[0]
indices = op.inputs[1]
dtensor = gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]),
element_dtype=tensor.dtype)
if op.type == "TensorListScatterV2":
return dtensor, None, None, None
else:
return dtensor, None, None
@ops.RegisterGradient("TensorListScatterIntoExistingList")
def _TensorListScatterIntoExistingListGrad(op, dlist):
"""Gradient function for TensorListScatterIntoExistingList."""
_, tensor, indices = op.inputs
dtensor = gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]),
element_dtype=tensor.dtype)
zeros = array_ops.zeros_like(tensor)
dlist = tensor_list_scatter(zeros, indices, indices, input_handle=dlist)
return dlist, dtensor, None
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is a scalar.
if not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
def convert(val):
if val is None:
return -1
if isinstance(val, ops.Tensor):
return val
if isinstance(val, tensor_shape.Dimension):
return val.value if val.value is not None else -1
return val
return [convert(d) for d in shape]
|
hperreault/ProjectEuler | refs/heads/master | Problems.py | 1 | import os
import re
# A class representing the list of all the problems
# Used for unit testing the problems and their answers
class Problems:
@staticmethod
def get_problems():
files = [f for f in os.listdir('.') if os.path.isfile(f)]
problems = []
for f in files:
if re.match("[\d\d\d_]", f):
name_with_number = os.path.splitext(f)[0]
class_name = os.path.splitext(f)[0][4:]
_temp = __import__(name_with_number, globals(), locals(), [class_name])
problem = eval("_temp." + class_name + "()")
problems.append(problem)
return problems
Problems().get_problems()
|
liorvh/infernal-twin | refs/heads/master | build/pip/pip/_vendor/cachecontrol/caches/file_cache.py | 762 | import hashlib
import os
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
from ..cache import BaseCache
from ..controller import CacheController
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
if use_dir_lock:
lock_class = MkdirLockFile
if lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
GaloisInc/hacrypto | refs/heads/master | src/C/john-1.8.0-jumbo-1/run/7z2john.py | 2 | #!/usr/bin/python -u
#
# Copyright (c) 2013 by Dhiru Kholia, <dhiru (at) openwall.com>
#
# Python Bindings for LZMA
#
# Copyright (c) 2004-2010 by Joachim Bauch, mail@joachim-bauch.de
# 7-Zip Copyright (C) 1999-2010 Igor Pavlov
# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
"""Read from and write to 7zip format archives.
"""
from binascii import unhexlify
from datetime import datetime
try:
import pylzma
# To install pylzma on Ubuntu:
# apt-get install python-pip python-dev
# pip install pylzma # may do as non-root user in group staff
except ImportError:
pass
from struct import pack, unpack
from zlib import crc32
import zlib
import bz2
import binascii
import StringIO
import sys
import os
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
from functools import reduce
except ImportError:
# reduce is available in functools starting with Python 2.6
pass
try:
from pytz import UTC
except ImportError:
# pytz is optional, define own "UTC" timestamp
# reference implementation from Python documentation
from datetime import timedelta, tzinfo
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
try:
unicode
except NameError:
# Python 3.x
def unicode(s, encoding):
return s
else:
def bytes(s, encoding):
return s
READ_BLOCKSIZE = 16384
MAGIC_7Z = unhexlify('377abcaf271c') # '7z\xbc\xaf\x27\x1c'
PROPERTY_END = unhexlify('00') # '\x00'
PROPERTY_HEADER = unhexlify('01') # '\x01'
PROPERTY_ARCHIVE_PROPERTIES = unhexlify('02') # '\x02'
PROPERTY_ADDITIONAL_STREAMS_INFO = unhexlify('03') # '\x03'
PROPERTY_MAIN_STREAMS_INFO = unhexlify('04') # '\x04'
PROPERTY_FILES_INFO = unhexlify('05') # '\x05'
PROPERTY_PACK_INFO = unhexlify('06') # '\x06'
PROPERTY_UNPACK_INFO = unhexlify('07') # '\x07'
PROPERTY_SUBSTREAMS_INFO = unhexlify('08') # '\x08'
PROPERTY_SIZE = unhexlify('09') # '\x09'
PROPERTY_CRC = unhexlify('0a') # '\x0a'
PROPERTY_FOLDER = unhexlify('0b') # '\x0b'
PROPERTY_CODERS_UNPACK_SIZE = unhexlify('0c') # '\x0c'
PROPERTY_NUM_UNPACK_STREAM = unhexlify('0d') # '\x0d'
PROPERTY_EMPTY_STREAM = unhexlify('0e') # '\x0e'
PROPERTY_EMPTY_FILE = unhexlify('0f') # '\x0f'
PROPERTY_ANTI = unhexlify('10') # '\x10'
PROPERTY_NAME = unhexlify('11') # '\x11'
PROPERTY_CREATION_TIME = unhexlify('12') # '\x12'
PROPERTY_LAST_ACCESS_TIME = unhexlify('13') # '\x13'
PROPERTY_LAST_WRITE_TIME = unhexlify('14') # '\x14'
PROPERTY_ATTRIBUTES = unhexlify('15') # '\x15'
PROPERTY_COMMENT = unhexlify('16') # '\x16'
PROPERTY_ENCODED_HEADER = unhexlify('17') # '\x17'
COMPRESSION_METHOD_COPY = unhexlify('00') # '\x00'
COMPRESSION_METHOD_LZMA = unhexlify('03') # '\x03'
COMPRESSION_METHOD_CRYPTO = unhexlify('06') # '\x06'
COMPRESSION_METHOD_MISC = unhexlify('04') # '\x04'
COMPRESSION_METHOD_MISC_ZIP = unhexlify('0401') # '\x04\x01'
COMPRESSION_METHOD_MISC_BZIP = unhexlify('0402') # '\x04\x02'
COMPRESSION_METHOD_7Z_AES256_SHA256 = unhexlify('06f10701') # '\x06\xf1\x07\x01'
# number of seconds between 1601/01/01 and 1970/01/01 (UTC)
# used to adjust 7z FILETIME to Python timestamp
TIMESTAMP_ADJUST = -11644473600
def toTimestamp(filetime):
"""Convert 7z FILETIME to Python timestamp."""
# FILETIME is 100-nanosecond intervals since 1601/01/01 (UTC)
return (filetime / 10000000.0) + TIMESTAMP_ADJUST
class ArchiveError(Exception):
pass
class FormatError(ArchiveError):
pass
class EncryptedArchiveError(ArchiveError):
pass
class UnsupportedCompressionMethodError(ArchiveError):
pass
class DecryptionError(ArchiveError):
pass
class NoPasswordGivenError(DecryptionError):
pass
class WrongPasswordError(DecryptionError):
pass
class ArchiveTimestamp(long):
"""Windows FILETIME timestamp."""
def __repr__(self):
return '%s(%d)' % (type(self).__name__, self)
def as_datetime(self):
"""Convert FILETIME to Python datetime object."""
return datetime.fromtimestamp(toTimestamp(self), UTC)
class Base(object):
""" base class with support for various basic read/write functions """
def _readReal64Bit(self, file):
res = file.read(8)
a, b = unpack('<LL', res)
return b << 32 | a, res
def _read64Bit(self, file):
b = ord(file.read(1))
mask = 0x80
for i in range(8):
if b & mask == 0:
bytes = list(unpack('%dB' % i, file.read(i)))
bytes.reverse()
value = (bytes and reduce(lambda x, y: x << 8 | y, bytes)) or 0
highpart = b & (mask - 1)
return value + (highpart << (i * 8))
mask >>= 1
def _readBoolean(self, file, count, checkall=0):
if checkall:
alldefined = file.read(1)
if alldefined != unhexlify('00'):
return [True] * count
result = []
b = 0
mask = 0
for i in range(count):
if mask == 0:
b = ord(file.read(1))
mask = 0x80
result.append(b & mask != 0)
mask >>= 1
return result
def checkcrc(self, crc, data):
check = crc32(data) & 0xffffffff
return crc == check
class PackInfo(Base):
""" informations about packed streams """
def __init__(self, file):
self.packpos = self._read64Bit(file)
self.numstreams = self._read64Bit(file)
id = file.read(1)
if id == PROPERTY_SIZE:
self.packsizes = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id == PROPERTY_CRC:
self.crcs = [self._read64Bit(file) for x in range(self.numstreams)]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class Folder(Base):
""" a "Folder" represents a stream of compressed data """
def __init__(self, file):
numcoders = self._read64Bit(file)
self.numcoders = numcoders
self.coders = []
self.digestdefined = False
totalin = 0
self.totalout = 0
for i in range(numcoders):
while True:
b = ord(file.read(1))
methodsize = b & 0xf
issimple = b & 0x10 == 0
noattributes = b & 0x20 == 0
last_alternative = b & 0x80 == 0
c = {}
c['method'] = file.read(methodsize)
if not issimple:
c['numinstreams'] = self._read64Bit(file)
c['numoutstreams'] = self._read64Bit(file)
else:
c['numinstreams'] = 1
c['numoutstreams'] = 1
totalin += c['numinstreams']
self.totalout += c['numoutstreams']
if not noattributes:
c['properties'] = file.read(self._read64Bit(file))
self.coders.append(c)
if last_alternative:
break
numbindpairs = self.totalout - 1
self.bindpairs = []
for i in range(numbindpairs):
self.bindpairs.append((self._read64Bit(file), self._read64Bit(file), ))
numpackedstreams = totalin - numbindpairs
self.numpackedstreams = numpackedstreams
self.packed_indexes = []
if numpackedstreams == 1:
for i in range(totalin):
if self.findInBindPair(i) < 0:
self.packed_indexes.append(i)
elif numpackedstreams > 1:
for i in range(numpackedstreams):
self.packed_indexes.append(self._read64Bit(file))
def getUnpackSize(self):
if not self.unpacksizes:
return 0
r = list(range(len(self.unpacksizes)))
r.reverse()
for i in r:
if self.findOutBindPair(i):
return self.unpacksizes[i]
raise TypeError('not found')
def findInBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if a == index:
return idx
return -1
def findOutBindPair(self, index):
for idx in range(len(self.bindpairs)):
a, b = self.bindpairs[idx]
if b == index:
return idx
return -1
class Digests(Base):
""" holds a list of checksums """
def __init__(self, file, count):
self.defined = self._readBoolean(file, count, checkall=1)
self.crcs = [unpack('<L', file.read(4))[0] for x in range(count)]
UnpackDigests = Digests
class UnpackInfo(Base):
""" combines multiple folders """
def __init__(self, file):
id = file.read(1)
if id != PROPERTY_FOLDER:
raise FormatError('folder id expected but %s found' % repr(id))
self.numfolders = self._read64Bit(file)
self.folders = []
external = file.read(1)
if external == unhexlify('00'):
self.folders = [Folder(file) for x in range(self.numfolders)]
elif external == unhexlify('01'):
self.datastreamidx = self._read64Bit(file)
else:
raise FormatError('0x00 or 0x01 expected but %s found' % repr(external))
id = file.read(1)
if id != PROPERTY_CODERS_UNPACK_SIZE:
raise FormatError('coders unpack size id expected but %s found' % repr(id))
for folder in self.folders:
folder.unpacksizes = [self._read64Bit(file) for x in range(folder.totalout)]
id = file.read(1)
if id == PROPERTY_CRC:
digests = UnpackDigests(file, self.numfolders)
for idx in range(self.numfolders):
folder = self.folders[idx]
folder.digestdefined = digests.defined[idx]
folder.crc = digests.crcs[idx]
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class SubstreamsInfo(Base):
""" defines the substreams of a folder """
def __init__(self, file, numfolders, folders):
self.digests = []
self.digestsdefined = []
id = file.read(1)
if id == PROPERTY_NUM_UNPACK_STREAM:
self.numunpackstreams = [self._read64Bit(file) for x in range(numfolders)]
id = file.read(1)
else:
self.numunpackstreams = []
for idx in range(numfolders):
self.numunpackstreams.append(1)
if id == PROPERTY_SIZE:
sum = 0
self.unpacksizes = []
for i in range(len(self.numunpackstreams)):
for j in range(1, self.numunpackstreams[i]):
size = self._read64Bit(file)
self.unpacksizes.append(size)
sum += size
self.unpacksizes.append(folders[i].getUnpackSize() - sum)
id = file.read(1)
numdigests = 0
numdigeststotal = 0
for i in range(numfolders):
numsubstreams = self.numunpackstreams[i]
if numsubstreams != 1 or not folders[i].digestdefined:
numdigests += numsubstreams
numdigeststotal += numsubstreams
if id == PROPERTY_CRC:
digests = Digests(file, numdigests)
didx = 0
for i in range(numfolders):
folder = folders[i]
numsubstreams = self.numunpackstreams[i]
if numsubstreams == 1 and folder.digestdefined:
self.digestsdefined.append(True)
self.digests.append(folder.crc)
else:
for j in range(numsubstreams):
self.digestsdefined.append(digests.defined[didx])
self.digests.append(digests.crcs[didx])
didx += 1
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %r found' % id)
if not self.digestsdefined:
self.digestsdefined = [False] * numdigeststotal
self.digests = [0] * numdigeststotal
class StreamsInfo(Base):
""" informations about compressed streams """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_PACK_INFO:
self.packinfo = PackInfo(file)
id = file.read(1)
if id == PROPERTY_UNPACK_INFO:
self.unpackinfo = UnpackInfo(file)
id = file.read(1)
if id == PROPERTY_SUBSTREAMS_INFO:
self.substreamsinfo = SubstreamsInfo(file, self.unpackinfo.numfolders, self.unpackinfo.folders)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % repr(id))
class FilesInfo(Base):
""" holds file properties """
def _readTimes(self, file, files, name):
defined = self._readBoolean(file, len(files), checkall=1)
# NOTE: the "external" flag is currently ignored, should be 0x00
external = file.read(1)
for i in range(len(files)):
if defined[i]:
files[i][name] = ArchiveTimestamp(self._readReal64Bit(file)[0])
else:
files[i][name] = None
def __init__(self, file):
self.numfiles = self._read64Bit(file)
self.files = [{'emptystream': False} for x in range(self.numfiles)]
numemptystreams = 0
while True:
typ = self._read64Bit(file)
if typ > 255:
raise FormatError('invalid type, must be below 256, is %d' % typ)
typ = pack('B', typ)
if typ == PROPERTY_END:
break
size = self._read64Bit(file)
buffer = BytesIO(file.read(size))
if typ == PROPERTY_EMPTY_STREAM:
isempty = self._readBoolean(buffer, self.numfiles)
list(map(lambda x, y: x.update({'emptystream': y}), self.files, isempty))
for x in isempty:
if x: numemptystreams += 1
emptyfiles = [False] * numemptystreams
antifiles = [False] * numemptystreams
elif typ == PROPERTY_EMPTY_FILE:
emptyfiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_ANTI:
antifiles = self._readBoolean(buffer, numemptystreams)
elif typ == PROPERTY_NAME:
external = buffer.read(1)
if external != unhexlify('00'):
self.dataindex = self._read64Bit(buffer)
# XXX: evaluate external
raise NotImplementedError
for f in self.files:
name = ''
while True:
ch = buffer.read(2)
if ch == unhexlify('0000'):
f['filename'] = name
break
name += ch.decode('utf-16')
elif typ == PROPERTY_CREATION_TIME:
self._readTimes(buffer, self.files, 'creationtime')
elif typ == PROPERTY_LAST_ACCESS_TIME:
self._readTimes(buffer, self.files, 'lastaccesstime')
elif typ == PROPERTY_LAST_WRITE_TIME:
self._readTimes(buffer, self.files, 'lastwritetime')
elif typ == PROPERTY_ATTRIBUTES:
defined = self._readBoolean(buffer, self.numfiles, checkall=1)
for i in range(self.numfiles):
f = self.files[i]
if defined[i]:
f['attributes'] = unpack('<L', buffer.read(4))[0]
else:
f['attributes'] = None
else:
raise FormatError('invalid type %r' % (typ))
class Header(Base):
""" the archive header """
def __init__(self, file):
id = file.read(1)
if id == PROPERTY_ARCHIVE_PROPERTIES:
self.properties = ArchiveProperties(file)
id = file.read(1)
if id == PROPERTY_ADDITIONAL_STREAMS_INFO:
self.additional_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_MAIN_STREAMS_INFO:
self.main_streams = StreamsInfo(file)
id = file.read(1)
if id == PROPERTY_FILES_INFO:
self.files = FilesInfo(file)
id = file.read(1)
if id != PROPERTY_END:
raise FormatError('end id expected but %s found' % (repr(id)))
class ArchiveFile(Base):
""" wrapper around a file in the archive """
def __init__(self, info, start, src_start, size, folder, archive, maxsize=None):
self.digest = None
self._archive = archive
self._file = archive._file
self._start = start
self._src_start = src_start
self._folder = folder
self.size = size
# maxsize is only valid for solid archives
self._maxsize = maxsize
for k, v in info.items():
setattr(self, k, v)
self.reset()
self._decoders = {
COMPRESSION_METHOD_COPY: '_read_copy',
COMPRESSION_METHOD_LZMA: '_read_lzma',
COMPRESSION_METHOD_MISC_ZIP: '_read_zip',
COMPRESSION_METHOD_MISC_BZIP: '_read_bzip',
COMPRESSION_METHOD_7Z_AES256_SHA256: '_read_7z_aes256_sha256',
}
def _is_encrypted(self):
return COMPRESSION_METHOD_7Z_AES256_SHA256 in [x['method'] for x in self._folder.coders]
def reset(self):
self.pos = 0
def read(self):
if not self._folder.coders:
raise TypeError("file has no coder informations")
data = None
for coder in self._folder.coders:
method = coder['method']
decoder = None
while method and decoder is None:
decoder = self._decoders.get(method, None)
method = method[:-1]
if decoder is None:
raise UnsupportedCompressionMethodError(repr(coder['method']))
data = getattr(self, decoder)(coder, data)
return data
def _read_copy(self, coder, input):
if not input:
self._file.seek(self._src_start)
input = self._file.read(self.uncompressed)
return input[self._start:self._start+self.size]
def _read_from_decompressor(self, coder, decompressor, input, checkremaining=False, with_cache=False):
data = ''
idx = 0
cnt = 0
properties = coder.get('properties', None)
if properties:
decompressor.decompress(properties)
total = self.compressed
if not input and total is None:
remaining = self._start+self.size
out = BytesIO()
cache = getattr(self._folder, '_decompress_cache', None)
if cache is not None:
data, pos, decompressor = cache
out.write(data)
remaining -= len(data)
self._file.seek(pos)
else:
self._file.seek(self._src_start)
checkremaining = checkremaining and not self._folder.solid
while remaining > 0:
data = self._file.read(READ_BLOCKSIZE)
if checkremaining or (with_cache and len(data) < READ_BLOCKSIZE):
tmp = decompressor.decompress(data, remaining)
else:
tmp = decompressor.decompress(data)
assert len(tmp) > 0
out.write(tmp)
remaining -= len(tmp)
data = out.getvalue()
if with_cache and self._folder.solid:
# don't decompress start of solid archive for next file
# TODO: limit size of cached data
self._folder._decompress_cache = (data, self._file.tell(), decompressor)
else:
if not input:
self._file.seek(self._src_start)
input = self._file.read(total)
if checkremaining:
data = decompressor.decompress(input, self._start+self.size)
else:
data = decompressor.decompress(input)
return data[self._start:self._start+self.size]
def _read_lzma(self, coder, input):
dec = pylzma.decompressobj(maxlength=self._start+self.size)
try:
return self._read_from_decompressor(coder, dec, input, checkremaining=True, with_cache=True)
except ValueError:
if self._is_encrypted():
raise WrongPasswordError('invalid password')
raise
def _read_zip(self, coder, input):
dec = zlib.decompressobj(-15)
return self._read_from_decompressor(coder, dec, input, checkremaining=True)
def _read_bzip(self, coder, input):
dec = bz2.BZ2Decompressor()
return self._read_from_decompressor(coder, dec, input)
def read_7z_aes256_sha256(self, coder, input):
if not self._archive.password:
raise NoPasswordGivenError()
# TODO: this needs some sanity checks
firstbyte = ord(coder['properties'][0])
numcyclespower = firstbyte & 0x3f
if firstbyte & 0xc0 != 0:
saltsize = (firstbyte >> 7) & 1
ivsize = (firstbyte >> 6) & 1
secondbyte = ord(coder['properties'][1])
saltsize += (secondbyte >> 4)
ivsize += (secondbyte & 0x0f)
assert len(coder['properties']) == 2+saltsize+ivsize
salt = coder['properties'][2:2+saltsize]
iv = coder['properties'][2+saltsize:2+saltsize+ivsize]
assert len(salt) == saltsize
assert len(iv) == ivsize
assert numcyclespower <= 24
if ivsize < 16:
iv += '\x00'*(16-ivsize)
else:
salt = iv = ''
password = self._archive.password.encode('utf-16-le')
key = pylzma.calculate_key(password, numcyclespower, salt=salt)
cipher = pylzma.AESDecrypt(key, iv=iv)
if not input:
self._file.seek(self._src_start)
uncompressed_size = self.uncompressed
if uncompressed_size & 0x0f:
# we need a multiple of 16 bytes
uncompressed_size += 16 - (uncompressed_size & 0x0f)
input = self._file.read(uncompressed_size)
result = cipher.decrypt(input)
return result
def checkcrc(self):
if self.digest is None:
return True
self.reset()
data = self.read()
return super(ArchiveFile, self).checkcrc(self.digest, data)
# XXX global state
iv = None
ivSize = None
Salt = None
NumCyclesPower = None
SaltSize = None
def SetDecoderProperties2(data):
global iv, ivSize, Salt, NumCyclesPower, SaltSize
pos = 0
data = bytearray(data)
firstByte = data[pos]
pos = pos + 1
NumCyclesPower = firstByte & 0x3F;
if NumCyclesPower > 24:
# print "Bad NumCyclesPower value"
return None
if ((firstByte & 0xC0) == 0):
# XXX
return "S_OK"
SaltSize = (firstByte >> 7) & 1;
ivSize = (firstByte >> 6) & 1;
secondByte = data[pos]
pos = pos + 1
SaltSize += (secondByte >> 4);
ivSize += (secondByte & 0x0F);
# get salt
Salt = data[pos:pos+SaltSize]
Salt = str(Salt)
pos = pos + SaltSize
# get iv
iv = data[pos:pos+ivSize]
iv = str(iv)
if len(iv) < 16:
iv = iv + "\x00" * (16 - len(iv))
return "OK"
class Archive7z(Base):
""" the archive itself """
def __init__(self, file, password=None):
self._file = file
self.password = password
self.header = file.read(len(MAGIC_7Z))
if self.header != MAGIC_7Z:
raise FormatError('not a 7z file')
self.version = unpack('BB', file.read(2))
self.startheadercrc = unpack('<L', file.read(4))[0]
self.nextheaderofs, data = self._readReal64Bit(file)
crc = crc32(data)
self.nextheadersize, data = self._readReal64Bit(file)
crc = crc32(data, crc)
data = file.read(4)
self.nextheadercrc = unpack('<L', data)[0]
crc = crc32(data, crc) & 0xffffffff
if crc != self.startheadercrc:
raise FormatError('invalid header data')
self.afterheader = file.tell()
file.seek(self.nextheaderofs, 1)
buffer = BytesIO(file.read(self.nextheadersize))
if not self.checkcrc(self.nextheadercrc, buffer.getvalue()):
raise FormatError('invalid header data')
while True:
id = buffer.read(1)
if not id or id == PROPERTY_HEADER:
break
if id != PROPERTY_ENCODED_HEADER:
raise TypeError('Unknown field: %r' % (id))
# ReadAndDecodePackedStreams (7zIn.cpp)
streams = StreamsInfo(buffer)
file.seek(self.afterheader + 0)
data = bytes('', 'ascii')
for folder in streams.unpackinfo.folders:
file.seek(streams.packinfo.packpos, 1)
props = folder.coders[0]['properties']
# decode properties
if SetDecoderProperties2(props):
# derive keys
# password = "password".encode('utf-16-le')
# print NumCyclesPower, Salt, password
# key = pylzma.calculate_key(password, NumCyclesPower, salt=Salt)
# cipher = pylzma.AESDecrypt(key, iv=str(iv))
global Salt
if len(Salt) == 0:
Salt = "\x11\x22" # fake salt
for idx in range(len(streams.packinfo.packsizes)):
tmp = file.read(streams.packinfo.packsizes[idx])
fname = os.path.basename(self._file.name)
print "%s:$7z$0$%s$%s$%s$%s$%s$%s$%s$%s$%s" % (fname,
NumCyclesPower, SaltSize, binascii.hexlify(Salt),
ivSize, binascii.hexlify(iv), folder.crc, len(tmp),
folder.unpacksizes[idx], binascii.hexlify(tmp))
# print binascii.hexlify(tmp)
# result = cipher.decrypt(tmp)
# print folder.unpacksizes
# print folder.coders
# XXX we don't now how to handle unpacksizes of size > 1
# XXX we need to locate correct data and pass it to correct decompressor
# XXX correct decompressor can be located from folder.coders
# data = result # for checksum check
size = folder.unpacksizes[idx] # for checksum check
if len(folder.unpacksizes) > 1:
sys.stderr.write("%s : multiple unpacksizes found, not supported fully yet!\n" % fname)
# print binascii.hexlify(result)
# flds = Folder(BytesIO(result))
# print flds.coders
# print flds.packed_indexes, flds.totalout
# XXX return can't be right
return
# else:
# for idx in range(len(streams.packinfo.packsizes)):
# tmp = file.read(streams.packinfo.packsizes[idx])
# data += pylzma.decompress(props+tmp, maxlength=folder.unpacksizes[idx])
#
# if folder.digestdefined:
# if not self.checkcrc(folder.crc, data[0:size]):
# raise FormatError('invalid block data')
# # XXX return can't be right
# return
# XXX this part is not done yet
sys.stderr.write("%s : 7-Zip files without header encryption are *not* supported yet!\n" % (file.name))
return
buffer = BytesIO(file.read())
id = buffer.read(1)
self.files = []
if not id:
# empty archive
self.solid = False
self.numfiles = 0
self.filenames = []
return
xx = FilesInfo(buffer)
self.header = Header(buffer)
files = self.header.files
folders = self.header.main_streams.unpackinfo.folders
packinfo = self.header.main_streams.packinfo
subinfo = self.header.main_streams.substreamsinfo
packsizes = packinfo.packsizes
self.solid = packinfo.numstreams == 1
if hasattr(subinfo, 'unpacksizes'):
unpacksizes = subinfo.unpacksizes
else:
unpacksizes = [x.unpacksizes[0] for x in folders]
fidx = 0
obidx = 0
src_pos = self.afterheader
pos = 0
folder_start = 0
folder_pos = src_pos
maxsize = (self.solid and packinfo.packsizes[0]) or None
for idx in range(files.numfiles):
info = files.files[idx]
if info['emptystream']:
continue
folder = folders[fidx]
folder.solid = subinfo.numunpackstreams[fidx] > 1
maxsize = (folder.solid and packinfo.packsizes[fidx]) or None
if folder.solid:
# file is part of solid archive
info['compressed'] = None
elif obidx < len(packsizes):
# file is compressed
info['compressed'] = packsizes[obidx]
else:
# file is not compressed
info['compressed'] = unpacksizes[obidx]
info['uncompressed'] = unpacksizes[obidx]
file = ArchiveFile(info, pos, src_pos, unpacksizes[obidx], folder, self, maxsize=maxsize)
if subinfo.digestsdefined[obidx]:
file.digest = subinfo.digests[obidx]
self.files.append(file)
if folder.solid:
pos += unpacksizes[obidx]
else:
src_pos += info['compressed']
obidx += 1
if idx >= subinfo.numunpackstreams[fidx]+folder_start:
folder_pos += packinfo.packsizes[fidx]
src_pos = folder_pos
folder_start = idx
fidx += 1
self.numfiles = len(self.files)
self.filenames = map(lambda x: x.filename, self.files)
# interface like TarFile
def getmember(self, name):
# XXX: store files in dictionary
for f in self.files:
if f.filename == name:
return f
return None
def getmembers(self):
return self.files
def getnames(self):
return self.filenames
def list(self, verbose=True):
print ('total %d files in %sarchive' % (self.numfiles, (self.solid and 'solid ') or ''))
if not verbose:
print ('\n'.join(self.filenames))
return
for f in self.files:
extra = (f.compressed and '%10d ' % (f.compressed)) or ' '
print ('%10d%s%.8x %s' % (f.size, extra, f.digest, f.filename))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stdout.write("Usage: %s < encrypted 7-Zip files >\n" % \
sys.argv[0])
for filename in sys.argv[1:]:
f = Archive7z(open(filename, 'rb'))
|
maxamillion/ansible-modules-extras | refs/heads/devel | cloud/amazon/sns_topic.py | 15 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
description:
- The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
version_added: 2.0
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
options:
name:
description:
- The name or ARN of the SNS topic to converge
required: True
state:
description:
- Whether to create or destroy an SNS topic
required: False
default: present
choices: ["absent", "present"]
display_name:
description:
- Display name of the topic
required: False
default: None
policy:
description:
- Policy to apply to the SNS topic
required: False
default: None
delivery_policy:
description:
- Delivery policy to apply to the SNS topic
required: False
default: None
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
required: False
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
required: False
default: True
extends_documentation_fragment: aws
requirements: [ "boto" ]
"""
EXAMPLES = """
- name: Create alarm SNS topic
sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "my_email_address@example.com"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = '''
sns_arn:
description: The ARN of the topic you are modifying
type: string
sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
sns_topic:
description: Dict of sns topic details
type: dict
sample:
name: sns-topic-name
state: present
display_name: default
policy: {}
delivery_policy: {}
subscriptions_new: []
subscriptions_existing: []
subscriptions_deleted: []
subscriptions_added: []
subscriptions_purge': false
check_mode: false
topic_created: false
topic_deleted: false
attributes_set: []
'''
import time
import json
import re
try:
import boto.sns
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
class SnsTopicManager(object):
""" Handles SNS Topic creation and destruction """
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params):
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_boto_connection()
self.changed = False
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.arn_topic = None
self.attributes_set = []
def _get_boto_connection(self):
try:
return connect_to_aws(boto.sns, self.region,
**self.aws_connect_params)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
def _get_all_topics(self):
next_token = None
topics = []
while True:
try:
response = self.connection.get_all_topics(next_token)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
if not next_token:
break
return [t['TopicArn'] for t in topics]
def _arn_topic_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._get_all_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
self.changed = True
self.topic_created = True
if not self.check_mode:
self.connection.create_topic(self.name)
self.arn_topic = self._arn_topic_lookup()
while not self.arn_topic:
time.sleep(3)
self.arn_topic = self._arn_topic_lookup()
def _set_topic_attrs(self):
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
['Attributes']
if self.display_name and self.display_name != topic_attributes['DisplayName']:
self.changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
self.display_name)
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
self.changed = True
self.attributes_set.append('policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
json.dumps(self.policy))
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
self.changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
json.dumps(self.delivery_policy))
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _get_topic_subs(self):
next_token = None
while True:
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['Subscriptions'])
next_token = response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['NextToken']
if not next_token:
break
def _set_topic_subs(self):
subscriptions_existing_list = []
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
if self.subscriptions_existing:
for sub in self.subscriptions_existing:
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.append(sub_key)
if self.purge_subscriptions and sub_key not in desired_subscriptions and \
sub['SubscriptionArn'] != 'PendingConfirmation':
self.changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
for (protocol, endpoint) in desired_subscriptions:
if (protocol, endpoint) not in subscriptions_existing_list:
self.changed = True
self.subscriptions_added.append(sub)
if not self.check_mode:
self.connection.subscribe(self.arn_topic, protocol, endpoint)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
for sub in self.subscriptions_existing:
if sub['SubscriptionArn'] != 'PendingConfirmation':
self.subscriptions_deleted.append(sub['SubscriptionArn'])
self.changed = True
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
def _delete_topic(self):
self.topic_deleted = True
self.changed = True
if not self.check_mode:
self.connection.delete_topic(self.arn_topic)
def ensure_ok(self):
self.arn_topic = self._arn_topic_lookup()
if not self.arn_topic:
self._create_topic()
self._set_topic_attrs()
self._get_topic_subs()
self._set_topic_subs()
def ensure_gone(self):
self.arn_topic = self._arn_topic_lookup()
if self.arn_topic:
self._get_topic_subs()
if self.subscriptions_existing:
self._delete_subscriptions()
self._delete_topic()
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'display_name': self.display_name,
'policy': self.policy,
'delivery_policy': self.delivery_policy,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set
}
return info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present',
'absent']),
display_name=dict(type='str', required=False),
policy=dict(type='dict', required=False),
delivery_policy=dict(type='dict', required=False),
subscriptions=dict(default=[], type='list', required=False),
purge_subscriptions=dict(type='bool', default=True),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params)
if state == 'present':
sns_topic.ensure_ok()
elif state == 'absent':
sns_topic.ensure_gone()
sns_facts = dict(changed=sns_topic.changed,
sns_arn=sns_topic.arn_topic,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
if __name__ == '__main__':
main()
|
etzhou/edx-platform | refs/heads/master | common/djangoapps/django_locale/__init__.py | 81 | """
TODO: This module is imported from the stable Django 1.8 branch, as a
copy of https://github.com/django/django/blob/stable/1.8.x/django/middleware/locale.py.
Remove this file and re-import this middleware from Django once the
codebase is upgraded with a modern version of Django. [PLAT-671]
"""
|
GunoH/intellij-community | refs/heads/master | python/testData/intentions/PyAnnotateVariableTypeIntentionTest/typeCommentLocalSimpleAssignmentTargetWithExistingComment_after.py | 19 | def func():
var = 'foo' # type: [str] # comment
var |
qedi-r/home-assistant | refs/heads/dev | homeassistant/components/device_tracker/__init__.py | 2 | """Provide functionality to keep track of devices."""
import asyncio
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.components import group
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.const import ATTR_GPS_ACCURACY, STATE_HOME
from . import legacy, setup
from .config_entry import ( # noqa: F401 pylint: disable=unused-import
async_setup_entry,
async_unload_entry,
)
from .legacy import DeviceScanner # noqa: F401 pylint: disable=unused-import
from .const import (
ATTR_ATTRIBUTES,
ATTR_BATTERY,
ATTR_CONSIDER_HOME,
ATTR_DEV_ID,
ATTR_GPS,
ATTR_HOST_NAME,
ATTR_LOCATION_NAME,
ATTR_MAC,
ATTR_SOURCE_TYPE,
CONF_AWAY_HIDE,
CONF_CONSIDER_HOME,
CONF_NEW_DEVICE_DEFAULTS,
CONF_SCAN_INTERVAL,
CONF_TRACK_NEW,
DEFAULT_AWAY_HIDE,
DEFAULT_CONSIDER_HOME,
DEFAULT_TRACK_NEW,
DOMAIN,
PLATFORM_TYPE_LEGACY,
SOURCE_TYPE_BLUETOOTH_LE,
SOURCE_TYPE_BLUETOOTH,
SOURCE_TYPE_GPS,
SOURCE_TYPE_ROUTER,
)
ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format("all_devices")
SERVICE_SEE = "see"
SOURCE_TYPES = (
SOURCE_TYPE_GPS,
SOURCE_TYPE_ROUTER,
SOURCE_TYPE_BLUETOOTH,
SOURCE_TYPE_BLUETOOTH_LE,
)
NEW_DEVICE_DEFAULTS_SCHEMA = vol.Any(
None,
vol.Schema(
{
vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean,
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
}
),
)
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SCAN_INTERVAL): cv.time_period,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CONSIDER_HOME, default=DEFAULT_CONSIDER_HOME): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_NEW_DEVICE_DEFAULTS, default={}): NEW_DEVICE_DEFAULTS_SCHEMA,
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SERVICE_SEE_PAYLOAD_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_MAC, ATTR_DEV_ID),
{
ATTR_MAC: cv.string,
ATTR_DEV_ID: cv.string,
ATTR_HOST_NAME: cv.string,
ATTR_LOCATION_NAME: cv.string,
ATTR_GPS: cv.gps,
ATTR_GPS_ACCURACY: cv.positive_int,
ATTR_BATTERY: cv.positive_int,
ATTR_ATTRIBUTES: dict,
ATTR_SOURCE_TYPE: vol.In(SOURCE_TYPES),
ATTR_CONSIDER_HOME: cv.time_period,
# Temp workaround for iOS app introduced in 0.65
vol.Optional("battery_status"): str,
vol.Optional("hostname"): str,
},
)
)
@bind_hass
def is_on(hass: HomeAssistantType, entity_id: str = None):
"""Return the state if any or a specified device is home."""
entity = entity_id or ENTITY_ID_ALL_DEVICES
return hass.states.is_state(entity, STATE_HOME)
def see(
hass: HomeAssistantType,
mac: str = None,
dev_id: str = None,
host_name: str = None,
location_name: str = None,
gps: GPSType = None,
gps_accuracy=None,
battery: int = None,
attributes: dict = None,
):
"""Call service to notify you see device."""
data = {
key: value
for key, value in (
(ATTR_MAC, mac),
(ATTR_DEV_ID, dev_id),
(ATTR_HOST_NAME, host_name),
(ATTR_LOCATION_NAME, location_name),
(ATTR_GPS, gps),
(ATTR_GPS_ACCURACY, gps_accuracy),
(ATTR_BATTERY, battery),
)
if value is not None
}
if attributes:
data[ATTR_ATTRIBUTES] = attributes
hass.services.call(DOMAIN, SERVICE_SEE, data)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the device tracker."""
tracker = await legacy.get_tracker(hass, config)
legacy_platforms = await setup.async_extract_config(hass, config)
setup_tasks = [
legacy_platform.async_setup_legacy(hass, tracker)
for legacy_platform in legacy_platforms
]
if setup_tasks:
await asyncio.wait(setup_tasks)
tracker.async_setup_group()
async def async_platform_discovered(p_type, info):
"""Load a platform."""
platform = await setup.async_create_platform_type(hass, config, p_type, {})
if platform is None or platform.type != PLATFORM_TYPE_LEGACY:
return
await platform.async_setup_legacy(hass, tracker, info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
# Clean up stale devices
async_track_utc_time_change(
hass, tracker.async_update_stale, second=range(0, 60, 5)
)
async def async_see_service(call):
"""Service to see a device."""
# Temp workaround for iOS, introduced in 0.65
data = dict(call.data)
data.pop("hostname", None)
data.pop("battery_status", None)
await tracker.async_see(**data)
hass.services.async_register(
DOMAIN, SERVICE_SEE, async_see_service, SERVICE_SEE_PAYLOAD_SCHEMA
)
# restore
await tracker.async_setup_tracked_device()
return True
|
robinro/ansible-modules-extras | refs/heads/devel | commands/expect.py | 18 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import datetime
try:
import pexpect
HAS_PEXPECT = True
except ImportError:
HAS_PEXPECT = False
DOCUMENTATION = '''
---
module: expect
version_added: 2.0
short_description: Executes a command and responds to prompts
description:
- The M(expect) module executes a command and responds to prompts
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work
options:
command:
description:
- the command module takes command to run.
required: true
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
required: false
chdir:
description:
- cd into this directory before running the command
required: false
responses:
description:
- Mapping of expected string/regex and string to respond with. If the
response is a list, successive matches return successive
responses. List functionality is new in 2.1.
required: true
timeout:
description:
- Amount of time in seconds to wait for the expected strings
default: 30
echo:
description:
- Whether or not to echo out your response strings
default: false
requirements:
- python >= 2.6
- pexpect >= 3.3
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you must specify a shell in the command such as
C(/bin/bash -c "/path/to/something | grep else")
- The question, or key, under I(responses) is a python regex match. Case
insensitive searches are indicated with a prefix of C(?i)
- By default, if a question is encountered multiple times, it's string
response will be repeated. If you need different responses for successive
question matches, instead of a string response, use a list of strings as
the response. The list functionality is new in 2.1
author: "Matt Martz (@sivel)"
'''
EXAMPLES = '''
# Case insensitve password string match
- expect:
command: passwd username
responses:
(?i)password: "MySekretPa$$word"
# Generic question with multiple different responses
- expect:
command: /path/to/custom/command
responses:
Question:
- response1
- response2
- response3
'''
def response_closure(module, question, responses):
resp_gen = (u'%s\n' % r.rstrip('\n').decode() for r in responses)
def wrapped(info):
try:
return resp_gen.next()
except StopIteration:
module.fail_json(msg="No remaining responses for '%s', "
"output was '%s'" %
(question,
info['child_result_list'][-1]))
return wrapped
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=True),
chdir=dict(),
creates=dict(),
removes=dict(),
responses=dict(type='dict', required=True),
timeout=dict(type='int', default=30),
echo=dict(type='bool', default=False),
)
)
if not HAS_PEXPECT:
module.fail_json(msg='The pexpect python module is required')
chdir = module.params['chdir']
args = module.params['command']
creates = module.params['creates']
removes = module.params['removes']
responses = module.params['responses']
timeout = module.params['timeout']
echo = module.params['echo']
events = dict()
for key, value in responses.iteritems():
if isinstance(value, list):
response = response_closure(module, key, value)
else:
response = u'%s\n' % value.rstrip('\n').decode()
events[key.decode()] = response
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
rc=0
)
startd = datetime.datetime.now()
try:
try:
# Prefer pexpect.run from pexpect>=4
out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo,
encoding='utf-8')
except TypeError:
# Use pexpect.runu in pexpect>=3.3,<4
out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True,
events=events, cwd=chdir, echo=echo)
except (TypeError, AttributeError):
e = get_exception()
# This should catch all insufficient versions of pexpect
# We deem them insufficient for their lack of ability to specify
# to not echo responses via the run/runu functions, which would
# potentially leak sensentive information
module.fail_json(msg='Insufficient version of pexpect installed '
'(%s), this module requires pexpect>=3.3. '
'Error was %s' % (pexpect.__version__, e))
except pexpect.ExceptionPexpect:
e = get_exception()
module.fail_json(msg='%s' % e)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
ret = dict(
cmd=args,
stdout=out.rstrip('\r\n'),
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc is not None:
module.exit_json(**ret)
else:
ret['msg'] = 'command exceeded timeout'
module.fail_json(**ret)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
main()
|
jkeifer/pyHytemporal | refs/heads/master | old_TO_MIGRATE/test4.py | 1 | __author__ = 'phoetrymaster'
import os
os.system("gdal_translate -ot Float32 -of GTiff -scale ") |
xuemy/btbbs | refs/heads/master | btbbs/urls.py | 1 | """btbbs URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
import bbs.urls
urlpatterns = [
url(r'^', include(bbs.urls)),
url(r'^admin/', admin.site.urls),
] |
markflyhigh/incubator-beam | refs/heads/master | sdks/python/apache_beam/io/gcp/datastore/v1new/datastoreio_test.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for datastoreio."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import math
import unittest
from mock import MagicMock
from mock import call
from mock import patch
# Protect against environments where datastore library is not available.
try:
from apache_beam.io.gcp.datastore.v1 import util
from apache_beam.io.gcp.datastore.v1new import helper
from apache_beam.io.gcp.datastore.v1new import query_splitter
from apache_beam.io.gcp.datastore.v1new.datastoreio import DeleteFromDatastore
from apache_beam.io.gcp.datastore.v1new.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1new.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1new.types import Key
from google.cloud.datastore import client
from google.cloud.datastore import entity
from google.cloud.datastore import helpers
from google.cloud.datastore import key
# Keep this import last so it doesn't import conflicting pb2 modules.
from apache_beam.io.gcp.datastore.v1 import datastoreio_test # pylint: disable=ungrouped-imports
DatastoreioTestBase = datastoreio_test.DatastoreioTest
# TODO(BEAM-4543): Remove TypeError once googledatastore dependency is removed.
except (ImportError, TypeError):
client = None
DatastoreioTestBase = unittest.TestCase
class FakeMutation(object):
def __init__(self, entity=None, key=None):
"""Fake mutation request object.
Requires exactly one of entity or key to be set.
Args:
entity: (``google.cloud.datastore.entity.Entity``) entity representing
this upsert mutation
key: (``google.cloud.datastore.key.Key``) key representing
this delete mutation
"""
self.entity = entity
self.key = key
def ByteSize(self):
if self.entity is not None:
return helpers.entity_to_protobuf(self.entity).ByteSize()
else:
return self.key.to_protobuf().ByteSize()
class FakeBatch(object):
def __init__(self, all_batch_items=None, commit_count=None):
"""Fake ``google.cloud.datastore.batch.Batch`` object.
Args:
all_batch_items: (list) If set, will append all entities/keys added to
this batch.
commit_count: (list of int) If set, will increment commit_count[0] on
each ``commit``.
"""
self._all_batch_items = all_batch_items
self._commit_count = commit_count
self.mutations = []
def put(self, _entity):
assert isinstance(_entity, entity.Entity)
self.mutations.append(FakeMutation(entity=_entity))
if self._all_batch_items is not None:
self._all_batch_items.append(_entity)
def delete(self, _key):
assert isinstance(_key, key.Key)
self.mutations.append(FakeMutation(key=_key))
if self._all_batch_items is not None:
self._all_batch_items.append(_key)
def begin(self):
pass
def commit(self):
if self._commit_count:
self._commit_count[0] += 1
@unittest.skipIf(client is None, 'Datastore dependencies are not installed')
class DatastoreioTest(DatastoreioTestBase):
"""
NOTE: This test inherits test cases from DatastoreioTestBase.
Please prefer to add new test cases to v1/datastoreio_test if possible.
"""
def setUp(self):
self._WRITE_BATCH_INITIAL_SIZE = util.WRITE_BATCH_INITIAL_SIZE
self._mock_client = MagicMock()
self._mock_client.project = self._PROJECT
self._mock_client.namespace = self._NAMESPACE
self._mock_query = MagicMock()
self._mock_query.limit = None
self._mock_query.order = None
self._real_client = client.Client(
project=self._PROJECT, namespace=self._NAMESPACE,
# Don't do any network requests.
_http=MagicMock())
def get_timestamp(self):
return datetime.datetime(2019, 3, 14, 15, 9, 26, 535897)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper, 'get_client', return_value=self._mock_client):
num_splits = 23
expected_num_splits = 23
def fake_get_splits(unused_client, query, num_splits):
return [query] * num_splits
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore._SplitQueryFn(num_splits)
split_queries = split_query_fn.process(self._mock_query)
self.assertEqual(expected_num_splits, len(split_queries))
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper, 'get_client', return_value=self._mock_client):
# Force _SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(
ReadFromDatastore._SplitQueryFn, 'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(unused_client, query, num_splits):
return [query] * num_splits
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore._SplitQueryFn(num_splits)
split_queries = split_query_fn.process(self._mock_query)
self.assertEqual(expected_num_splits, len(split_queries))
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper, 'get_client', return_value=self._mock_client):
num_splits = 4
expected_num_splits = 1
self._mock_query.limit = 3
split_query_fn = ReadFromDatastore._SplitQueryFn(num_splits)
split_queries = split_query_fn.process(self._mock_query)
self.assertEqual(expected_num_splits, len(split_queries))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper, 'get_client', return_value=self._mock_client):
# Force _SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(
ReadFromDatastore._SplitQueryFn, 'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter, 'get_splits',
side_effect=query_splitter.QuerySplitterError(
"Testing query split error")):
split_query_fn = ReadFromDatastore._SplitQueryFn(num_splits)
split_queries = split_query_fn.process(self._mock_query)
self.assertEqual(expected_num_splits, len(split_queries))
self.assertEqual(self._mock_query, split_queries[0])
def check_DatastoreWriteFn(self, num_entities, use_fixed_batch_size=False):
"""A helper function to test _DatastoreWriteFn."""
with patch.object(helper, 'get_client', return_value=self._mock_client):
entities = helper.create_entities(num_entities)
expected_entities = [entity.to_client_entity() for entity in entities]
# Infer project from write fn project arg.
if num_entities:
key = Key(['k1', 1234], project=self._PROJECT)
expected_key = key.to_client_key()
key.project = None
entities[0].key = key
expected_entities[0].key = expected_key
all_batch_entities = []
commit_count = [0]
self._mock_client.batch.side_effect = (
lambda: FakeBatch(all_batch_items=all_batch_entities,
commit_count=commit_count))
datastore_write_fn = WriteToDatastore._DatastoreWriteFn(self._PROJECT)
datastore_write_fn.start_bundle()
for entity in entities:
datastore_write_fn.process(entity)
datastore_write_fn.finish_bundle()
self.assertListEqual([e.key for e in all_batch_entities],
[e.key for e in expected_entities])
batch_count = math.ceil(num_entities / util.WRITE_BATCH_MAX_SIZE)
self.assertLessEqual(batch_count, commit_count[0])
def test_DatastoreWriteLargeEntities(self):
"""100*100kB entities gets split over two Commit RPCs."""
with patch.object(helper, 'get_client', return_value=self._mock_client):
entities = helper.create_entities(100)
commit_count = [0]
self._mock_client.batch.side_effect = (
lambda: FakeBatch(commit_count=commit_count))
datastore_write_fn = WriteToDatastore._DatastoreWriteFn(
self._PROJECT)
datastore_write_fn.start_bundle()
for entity in entities:
entity.set_properties({'large': u'A' * 100000})
datastore_write_fn.process(entity)
datastore_write_fn.finish_bundle()
self.assertEqual(2, commit_count[0])
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
self._mock_client.namespace = namespace
self._mock_client.query.return_value = self._mock_query
self._mock_query.project = self._PROJECT
self._mock_query.namespace = namespace
self._mock_query.fetch.side_effect = [
[{'timestamp': timestamp}],
[{'entity_bytes': entity_bytes}],
]
self._mock_query.kind = self._KIND
split_query_fn = ReadFromDatastore._SplitQueryFn(num_splits=0)
self.assertEqual(entity_bytes,
split_query_fn.get_estimated_size_bytes(self._mock_client,
self._mock_query))
if namespace is None:
ns_keyword = '_'
else:
ns_keyword = '_Ns_'
self._mock_client.query.assert_has_calls([
call(kind='__Stat%sTotal__' % ns_keyword, order=['-timestamp']),
call().fetch(limit=1),
call(kind='__Stat%sKind__' % ns_keyword),
call().add_filter('kind_name', '=', self._KIND),
call().add_filter('timestamp', '=', timestamp),
call().fetch(limit=1),
])
def test_DatastoreDeleteFn(self):
with patch.object(helper, 'get_client', return_value=self._mock_client):
keys = [entity.key for entity in helper.create_entities(10)]
expected_keys = [key.to_client_key() for key in keys]
# Infer project from delete fn project arg.
key = Key(['k1', 1234], project=self._PROJECT)
expected_key = key.to_client_key()
key.project = None
keys.append(key)
expected_keys.append(expected_key)
all_batch_keys = []
self._mock_client.batch.side_effect = (
lambda: FakeBatch(all_batch_items=all_batch_keys))
datastore_delete_fn = DeleteFromDatastore._DatastoreDeleteFn(
self._PROJECT)
datastore_delete_fn.start_bundle()
for key in keys:
datastore_delete_fn.process(key)
datastore_delete_fn.finish_bundle()
self.assertListEqual(all_batch_keys, expected_keys)
# Hide base class from collection by nose.
del DatastoreioTestBase
if __name__ == '__main__':
unittest.main()
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/scipy/weave/blitz_tools.py | 97 | from __future__ import absolute_import, print_function
import parser
import sys
import warnings
import copy
import numpy
from . import ast_tools
from . import slice_handler
from . import size_check
from . import converters
from . import inline_tools
from .inline_tools import attempt_function_call
function_catalog = inline_tools.function_catalog
function_cache = inline_tools.function_cache
class BlitzWarning(UserWarning):
"""Warns about compilation failures etc."""
pass
def blitz(expr,local_dict=None, global_dict=None,check_size=1,verbose=0,**kw):
# this could call inline, but making a copy of the
# code here is more efficient for several reasons.
global function_catalog
# this grabs the local variables from the *previous* call
# frame -- that is the locals from the function that called
# inline.
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
# 1. Check the sizes of the arrays and make sure they are compatible.
# This is expensive, so unsetting the check_size flag can save a lot
# of time. It also can cause core-dumps if the sizes of the inputs
# aren't compatible.
if check_size and not size_check.check_expr(expr,local_dict,global_dict):
raise ValueError("inputs failed to pass size check.")
# 2. try local cache
try:
results = apply(function_cache[expr],(local_dict,global_dict))
return results
except:
pass
try:
results = attempt_function_call(expr,local_dict,global_dict)
# 3. build the function
except ValueError:
# This section is pretty much the only difference
# between blitz and inline
ast = parser.suite(expr)
ast_list = ast.tolist()
expr_code = ast_to_blitz_expr(ast_list)
arg_names = ast_tools.harvest_variables(ast_list)
module_dir = global_dict.get('__file__',None)
func = inline_tools.compile_function(expr_code,arg_names,local_dict,
global_dict,module_dir,
compiler='gcc',auto_downcast=1,
verbose=verbose,
type_converters=converters.blitz,
**kw)
function_catalog.add_function(expr,func,module_dir)
try:
results = attempt_function_call(expr,local_dict,global_dict)
except ValueError:
warnings.warn('compilation failed. Executing as python code',
BlitzWarning)
exec(expr, global_dict, local_dict)
def ast_to_blitz_expr(ast_seq):
"""Convert an ast_sequence to a blitz expression."""
# Don't overwrite orignal sequence in call to transform slices.
ast_seq = copy.deepcopy(ast_seq)
slice_handler.transform_slices(ast_seq)
# Build the actual program statement from ast_seq
expr = ast_tools.ast_to_string(ast_seq)
# Now find and replace specific symbols to convert this to
# a blitz++ compatible statement.
# I'm doing this with string replacement here. It could
# also be done on the actual ast tree (and probably should from
# a purest standpoint...).
# this one isn't necessary but it helps code readability
# and compactness. It requires that
# Range _all = blitz::Range::all();
# be included in the generated code.
# These could all alternatively be done to the ast in
# build_slice_atom()
expr = expr.replace('slice(_beg,_end)', '_all')
expr = expr.replace('slice', 'blitz::Range')
expr = expr.replace('[','(')
expr = expr.replace(']', ')')
expr = expr.replace('_stp', '1')
# Instead of blitz::fromStart and blitz::toEnd. This requires
# the following in the generated code.
# Range _beg = blitz::fromStart;
# Range _end = blitz::toEnd;
#expr = expr.replace('_beg', 'blitz::fromStart' )
#expr = expr.replace('_end', 'blitz::toEnd' )
return expr + ';\n'
|
timj/scons | refs/heads/master | src/engine/SCons/Tool/docbook/__init__.py | 1 |
"""SCons.Tool.docbook
Tool-specific initialization for Docbook.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import glob
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Script
import SCons.Tool
import SCons.Util
# Get full path to this script
scriptpath = os.path.dirname(os.path.realpath(__file__))
# Local folder for the collection of DocBook XSLs
db_xsl_folder = 'docbook-xsl-1.76.1'
# Do we have libxml2/libxslt/lxml?
has_libxml2 = True
has_lxml = True
try:
import libxml2
import libxslt
except:
has_libxml2 = False
try:
import lxml
except:
has_lxml = False
# Set this to True, to prefer xsltproc over libxml2 and lxml
prefer_xsltproc = False
# Regexs for parsing Docbook XML sources of MAN pages
re_manvolnum = re.compile("<manvolnum>([^<]*)</manvolnum>")
re_refname = re.compile("<refname>([^<]*)</refname>")
#
# Helper functions
#
def __extend_targets_sources(target, source):
""" Prepare the lists of target and source files. """
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
elif not SCons.Util.is_List(source):
source = [source]
if len(target) < len(source):
target.extend(source[len(target):])
return target, source
def __init_xsl_stylesheet(kw, env, user_xsl_var, default_path):
if kw.get('DOCBOOK_XSL','') == '':
xsl_style = kw.get('xsl', env.subst(user_xsl_var))
if xsl_style == '':
path_args = [scriptpath, db_xsl_folder] + default_path
xsl_style = os.path.join(*path_args)
kw['DOCBOOK_XSL'] = xsl_style
def __select_builder(lxml_builder, libxml2_builder, cmdline_builder):
""" Selects a builder, based on which Python modules are present. """
if prefer_xsltproc:
return cmdline_builder
if not has_libxml2:
# At the moment we prefer libxml2 over lxml, the latter can lead
# to conflicts when installed together with libxml2.
if has_lxml:
return lxml_builder
else:
return cmdline_builder
return libxml2_builder
def __ensure_suffix(t, suffix):
""" Ensure that the target t has the given suffix. """
tpath = str(t)
if not tpath.endswith(suffix):
return tpath+suffix
return t
def __ensure_suffix_stem(t, suffix):
""" Ensure that the target t has the given suffix, and return the file's stem. """
tpath = str(t)
if not tpath.endswith(suffix):
stem = tpath
tpath += suffix
return tpath, stem
else:
stem, ext = os.path.splitext(tpath)
return t, stem
def __get_xml_text(root):
""" Return the text for the given root node (xml.dom.minidom). """
txt = ""
for e in root.childNodes:
if (e.nodeType == e.TEXT_NODE):
txt += e.data
return txt
def __create_output_dir(base_dir):
""" Ensure that the output directory base_dir exists. """
root, tail = os.path.split(base_dir)
dir = None
if tail:
if base_dir.endswith('/'):
dir = base_dir
else:
dir = root
else:
if base_dir.endswith('/'):
dir = base_dir
if dir and not os.path.isdir(dir):
os.makedirs(dir)
#
# Supported command line tools and their call "signature"
#
xsltproc_com = {'xsltproc' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -o $TARGET $DOCBOOK_XSL $SOURCE',
'saxon' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -o $TARGET $DOCBOOK_XSL $SOURCE $DOCBOOK_XSLTPROCPARAMS',
'saxon-xslt' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -o $TARGET $DOCBOOK_XSL $SOURCE $DOCBOOK_XSLTPROCPARAMS',
'xalan' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -q -out $TARGET -xsl $DOCBOOK_XSL -in $SOURCE'}
xmllint_com = {'xmllint' : '$DOCBOOK_XMLLINT $DOCBOOK_XMLLINTFLAGS --xinclude $SOURCE > $TARGET'}
fop_com = {'fop' : '$DOCBOOK_FOP $DOCBOOK_FOPFLAGS -fo $SOURCE -pdf $TARGET',
'xep' : '$DOCBOOK_FOP $DOCBOOK_FOPFLAGS -valid -fo $SOURCE -pdf $TARGET',
'jw' : '$DOCBOOK_FOP $DOCBOOK_FOPFLAGS -f docbook -b pdf $SOURCE -o $TARGET'}
def __detect_cl_tool(env, chainkey, cdict):
"""
Helper function, picks a command line tool from the list
and initializes its environment variables.
"""
if env.get(chainkey,'') == '':
clpath = ''
for cltool in cdict:
clpath = env.WhereIs(cltool)
if clpath:
env[chainkey] = clpath
if not env[chainkey + 'COM']:
env[chainkey + 'COM'] = cdict[cltool]
def _detect(env):
"""
Detect all the command line tools that we might need for creating
the requested output formats.
"""
global prefer_xsltproc
if env.get('DOCBOOK_PREFER_XSLTPROC',''):
prefer_xsltproc = True
if ((not has_libxml2 and not has_lxml) or (prefer_xsltproc)):
# Try to find the XSLT processors
__detect_cl_tool(env, 'DOCBOOK_XSLTPROC', xsltproc_com)
__detect_cl_tool(env, 'DOCBOOK_XMLLINT', xmllint_com)
__detect_cl_tool(env, 'DOCBOOK_FOP', fop_com)
#
# Scanners
#
include_re = re.compile('fileref\\s*=\\s*["|\']([^\\n]*)["|\']')
sentity_re = re.compile('<!ENTITY\\s+%*\\s*[^\\s]+\\s+SYSTEM\\s+["|\']([^\\n]*)["|\']>')
def __xml_scan(node, env, path, arg):
""" Simple XML file scanner, detecting local images and XIncludes as implicit dependencies. """
# Does the node exist yet?
if not os.path.isfile(str(node)):
return []
if env.get('DOCBOOK_SCANENT',''):
# Use simple pattern matching for system entities..., no support
# for recursion yet.
contents = node.get_text_contents()
return sentity_re.findall(contents)
xsl_file = os.path.join(scriptpath,'utils','xmldepend.xsl')
if not has_libxml2 or prefer_xsltproc:
if has_lxml and not prefer_xsltproc:
from lxml import etree
xsl_tree = etree.parse(xsl_file)
doc = etree.parse(str(node))
result = doc.xslt(xsl_tree)
depfiles = [x.strip() for x in str(result).splitlines() if x.strip() != "" and not x.startswith("<?xml ")]
return depfiles
else:
# Try to call xsltproc
xsltproc = env.subst("$DOCBOOK_XSLTPROC")
if xsltproc and xsltproc.endswith('xsltproc'):
result = env.backtick(' '.join([xsltproc, xsl_file, str(node)]))
depfiles = [x.strip() for x in str(result).splitlines() if x.strip() != "" and not x.startswith("<?xml ")]
return depfiles
else:
# Use simple pattern matching, there is currently no support
# for xi:includes...
contents = node.get_text_contents()
return include_re.findall(contents)
styledoc = libxml2.parseFile(xsl_file)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.readFile(str(node), None, libxml2.XML_PARSE_NOENT)
result = style.applyStylesheet(doc, None)
depfiles = []
for x in str(result).splitlines():
if x.strip() != "" and not x.startswith("<?xml "):
depfiles.extend(x.strip().split())
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
return depfiles
# Creating the instance of our XML dependency scanner
docbook_xml_scanner = SCons.Script.Scanner(function = __xml_scan,
argument = None)
#
# Action generators
#
def __generate_xsltproc_action(source, target, env, for_signature):
cmd = env['DOCBOOK_XSLTPROCCOM']
# Does the environment have a base_dir defined?
base_dir = env.subst('$base_dir')
if base_dir:
# Yes, so replace target path by its filename
return cmd.replace('$TARGET','${TARGET.file}')
return cmd
#
# Emitters
#
def __emit_xsl_basedir(target, source, env):
# Does the environment have a base_dir defined?
base_dir = env.subst('$base_dir')
if base_dir:
# Yes, so prepend it to each target
return [os.path.join(base_dir, str(t)) for t in target], source
# No, so simply pass target and source names through
return target, source
#
# Builders
#
def __build_libxml2(target, source, env):
"""
General XSLT builder (HTML/FO), using the libxml2 module.
"""
xsl_style = env.subst('$DOCBOOK_XSL')
styledoc = libxml2.parseFile(xsl_style)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.readFile(str(source[0]),None,libxml2.XML_PARSE_NOENT)
# Support for additional parameters
parampass = {}
if parampass:
result = style.applyStylesheet(doc, parampass)
else:
result = style.applyStylesheet(doc, None)
style.saveResultToFilename(str(target[0]), result, 0)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
return None
def __build_lxml(target, source, env):
"""
General XSLT builder (HTML/FO), using the lxml module.
"""
from lxml import etree
xslt_ac = etree.XSLTAccessControl(read_file=True,
write_file=True,
create_dir=True,
read_network=False,
write_network=False)
xsl_style = env.subst('$DOCBOOK_XSL')
xsl_tree = etree.parse(xsl_style)
transform = etree.XSLT(xsl_tree, access_control=xslt_ac)
doc = etree.parse(str(source[0]))
# Support for additional parameters
parampass = {}
if parampass:
result = transform(doc, **parampass)
else:
result = transform(doc)
try:
of = open(str(target[0]), "wb")
of.write(of.write(etree.tostring(result, pretty_print=True)))
of.close()
except:
pass
return None
def __xinclude_libxml2(target, source, env):
"""
Resolving XIncludes, using the libxml2 module.
"""
doc = libxml2.readFile(str(source[0]), None, libxml2.XML_PARSE_NOENT)
doc.xincludeProcessFlags(libxml2.XML_PARSE_NOENT)
doc.saveFile(str(target[0]))
doc.freeDoc()
return None
def __xinclude_lxml(target, source, env):
"""
Resolving XIncludes, using the lxml module.
"""
from lxml import etree
doc = etree.parse(str(source[0]))
doc.xinclude()
try:
doc.write(str(target[0]), xml_declaration=True,
encoding="UTF-8", pretty_print=True)
except:
pass
return None
__libxml2_builder = SCons.Builder.Builder(
action = __build_libxml2,
src_suffix = '.xml',
source_scanner = docbook_xml_scanner,
emitter = __emit_xsl_basedir)
__lxml_builder = SCons.Builder.Builder(
action = __build_lxml,
src_suffix = '.xml',
source_scanner = docbook_xml_scanner,
emitter = __emit_xsl_basedir)
__xinclude_libxml2_builder = SCons.Builder.Builder(
action = __xinclude_libxml2,
suffix = '.xml',
src_suffix = '.xml',
source_scanner = docbook_xml_scanner)
__xinclude_lxml_builder = SCons.Builder.Builder(
action = __xinclude_lxml,
suffix = '.xml',
src_suffix = '.xml',
source_scanner = docbook_xml_scanner)
__xsltproc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__generate_xsltproc_action,
{'cmdstr' : '$DOCBOOK_XSLTPROCCOMSTR'}),
src_suffix = '.xml',
source_scanner = docbook_xml_scanner,
emitter = __emit_xsl_basedir)
__xmllint_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$DOCBOOK_XMLLINTCOM','$DOCBOOK_XMLLINTCOMSTR'),
suffix = '.xml',
src_suffix = '.xml',
source_scanner = docbook_xml_scanner)
__fop_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$DOCBOOK_FOPCOM','$DOCBOOK_FOPCOMSTR'),
suffix = '.pdf',
src_suffix = '.fo',
ensure_suffix=1)
def DocbookEpub(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for ePub output.
"""
import zipfile
import shutil
def build_open_container(target, source, env):
"""Generate the *.epub file from intermediate outputs
Constructs the epub file according to the Open Container Format. This
function could be replaced by a call to the SCons Zip builder if support
was added for different compression formats for separate source nodes.
"""
zf = zipfile.ZipFile(str(target[0]), 'w')
mime_file = open('mimetype', 'w')
mime_file.write('application/epub+zip')
mime_file.close()
zf.write(mime_file.name, compress_type = zipfile.ZIP_STORED)
for s in source:
if os.path.isfile(str(s)):
head, tail = os.path.split(str(s))
if not head:
continue
s = head
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))),
zipfile.ZIP_DEFLATED)
zf.close()
def add_resources(target, source, env):
"""Add missing resources to the OEBPS directory
Ensure all the resources in the manifest are present in the OEBPS directory.
"""
hrefs = []
content_file = os.path.join(source[0].get_abspath(), 'content.opf')
if not os.path.isfile(content_file):
return
hrefs = []
if has_libxml2:
nsmap = {'opf' : 'http://www.idpf.org/2007/opf'}
# Read file and resolve entities
doc = libxml2.readFile(content_file, None, 0)
opf = doc.getRootElement()
# Create xpath context
xpath_context = doc.xpathNewContext()
# Register namespaces
for key, val in nsmap.iteritems():
xpath_context.xpathRegisterNs(key, val)
if hasattr(opf, 'xpathEval') and xpath_context:
# Use the xpath context
xpath_context.setContextNode(opf)
items = xpath_context.xpathEval(".//opf:item")
else:
items = opf.findall(".//{'http://www.idpf.org/2007/opf'}item")
for item in items:
if hasattr(item, 'prop'):
hrefs.append(item.prop('href'))
else:
hrefs.append(item.attrib['href'])
doc.freeDoc()
xpath_context.xpathFreeContext()
elif has_lxml:
from lxml import etree
opf = etree.parse(content_file)
# All the opf:item elements are resources
for item in opf.xpath('//opf:item',
namespaces= { 'opf': 'http://www.idpf.org/2007/opf' }):
hrefs.append(item.attrib['href'])
for href in hrefs:
# If the resource was not already created by DocBook XSL itself,
# copy it into the OEBPS folder
referenced_file = os.path.join(source[0].get_abspath(), href)
if not os.path.exists(referenced_file):
shutil.copy(href, os.path.join(source[0].get_abspath(), href))
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_EPUB', ['epub','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
if not env.GetOption('clean'):
# Ensure that the folders OEBPS and META-INF exist
__create_output_dir('OEBPS/')
__create_output_dir('META-INF/')
dirs = env.Dir(['OEBPS', 'META-INF'])
# Set the fixed base_dir
kw['base_dir'] = 'OEBPS/'
tocncx = __builder.__call__(env, 'toc.ncx', source[0], **kw)
cxml = env.File('META-INF/container.xml')
env.SideEffect(cxml, tocncx)
env.Depends(tocncx, kw['DOCBOOK_XSL'])
result.extend(tocncx+[cxml])
container = env.Command(__ensure_suffix(str(target[0]), '.epub'),
tocncx+[cxml], [add_resources, build_open_container])
mimetype = env.File('mimetype')
env.SideEffect(mimetype, container)
result.extend(container)
# Add supporting files for cleanup
env.Clean(tocncx, dirs)
return result
def DocbookHtml(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTML output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTML', ['html','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
r = __builder.__call__(env, __ensure_suffix(t,'.html'), s, **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
return result
def DocbookHtmlChunked(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for chunked HTML output.
"""
# Init target/source
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTMLCHUNKED', ['html','chunkfast.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, glob.glob(os.path.join(base_dir, '*.html')))
return result
def DocbookHtmlhelp(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTMLHELP output.
"""
# Init target/source
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTMLHELP', ['htmlhelp','htmlhelp.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, ['toc.hhc', 'htmlhelp.hhp', 'index.hhk'] +
glob.glob(os.path.join(base_dir, '[ar|bk|ch]*.html')))
return result
def DocbookPdf(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for PDF output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_PDF', ['fo','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
t, stem = __ensure_suffix_stem(t, '.pdf')
xsl = __builder.__call__(env, stem+'.fo', s, **kw)
result.extend(xsl)
env.Depends(xsl, kw['DOCBOOK_XSL'])
result.extend(__fop_builder.__call__(env, t, xsl, **kw))
return result
def DocbookMan(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for Man page output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_MAN', ['manpages','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
volnum = "1"
outfiles = []
srcfile = __ensure_suffix(str(s),'.xml')
if os.path.isfile(srcfile):
try:
import xml.dom.minidom
dom = xml.dom.minidom.parse(__ensure_suffix(str(s),'.xml'))
# Extract volume number, default is 1
for node in dom.getElementsByTagName('refmeta'):
for vol in node.getElementsByTagName('manvolnum'):
volnum = __get_xml_text(vol)
# Extract output filenames
for node in dom.getElementsByTagName('refnamediv'):
for ref in node.getElementsByTagName('refname'):
outfiles.append(__get_xml_text(ref)+'.'+volnum)
except:
# Use simple regex parsing
f = open(__ensure_suffix(str(s),'.xml'), 'r')
content = f.read()
f.close()
for m in re_manvolnum.finditer(content):
volnum = m.group(1)
for m in re_refname.finditer(content):
outfiles.append(m.group(1)+'.'+volnum)
if not outfiles:
# Use stem of the source file
spath = str(s)
if not spath.endswith('.xml'):
outfiles.append(spath+'.'+volnum)
else:
stem, ext = os.path.splitext(spath)
outfiles.append(stem+'.'+volnum)
else:
# We have to completely rely on the given target name
outfiles.append(t)
__builder.__call__(env, outfiles[0], s, **kw)
env.Depends(outfiles[0], kw['DOCBOOK_XSL'])
result.append(outfiles[0])
if len(outfiles) > 1:
env.Clean(outfiles[0], outfiles[1:])
return result
def DocbookSlidesPdf(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for PDF slides output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_SLIDESPDF', ['slides','fo','plain.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
t, stem = __ensure_suffix_stem(t, '.pdf')
xsl = __builder.__call__(env, stem+'.fo', s, **kw)
env.Depends(xsl, kw['DOCBOOK_XSL'])
result.extend(xsl)
result.extend(__fop_builder.__call__(env, t, xsl, **kw))
return result
def DocbookSlidesHtml(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTML slides output.
"""
# Init list of targets/sources
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_SLIDESHTML', ['slides','html','plain.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, [os.path.join(base_dir, 'toc.html')] +
glob.glob(os.path.join(base_dir, 'foil*.html')))
return result
def DocbookXInclude(env, target, source, *args, **kw):
"""
A pseudo-Builder, for resolving XIncludes in a separate processing step.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Setup builder
__builder = __select_builder(__xinclude_lxml_builder,__xinclude_libxml2_builder,__xmllint_builder)
# Create targets
result = []
for t,s in zip(target,source):
result.extend(__builder.__call__(env, t, s, **kw))
return result
def DocbookXslt(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, applying a simple XSL transformation to the input file.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
kw['DOCBOOK_XSL'] = kw.get('xsl', 'transform.xsl')
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
r = __builder.__call__(env, t, s, **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
return result
def generate(env):
"""Add Builders and construction variables for docbook to an Environment."""
env.SetDefault(
# Default names for customized XSL stylesheets
DOCBOOK_DEFAULT_XSL_EPUB = '',
DOCBOOK_DEFAULT_XSL_HTML = '',
DOCBOOK_DEFAULT_XSL_HTMLCHUNKED = '',
DOCBOOK_DEFAULT_XSL_HTMLHELP = '',
DOCBOOK_DEFAULT_XSL_PDF = '',
DOCBOOK_DEFAULT_XSL_MAN = '',
DOCBOOK_DEFAULT_XSL_SLIDESPDF = '',
DOCBOOK_DEFAULT_XSL_SLIDESHTML = '',
# Paths to the detected executables
DOCBOOK_XSLTPROC = '',
DOCBOOK_XMLLINT = '',
DOCBOOK_FOP = '',
# Additional flags for the text processors
DOCBOOK_XSLTPROCFLAGS = SCons.Util.CLVar(''),
DOCBOOK_XMLLINTFLAGS = SCons.Util.CLVar(''),
DOCBOOK_FOPFLAGS = SCons.Util.CLVar(''),
DOCBOOK_XSLTPROCPARAMS = SCons.Util.CLVar(''),
# Default command lines for the detected executables
DOCBOOK_XSLTPROCCOM = xsltproc_com['xsltproc'],
DOCBOOK_XMLLINTCOM = xmllint_com['xmllint'],
DOCBOOK_FOPCOM = fop_com['fop'],
# Screen output for the text processors
DOCBOOK_XSLTPROCCOMSTR = None,
DOCBOOK_XMLLINTCOMSTR = None,
DOCBOOK_FOPCOMSTR = None,
)
_detect(env)
env.AddMethod(DocbookEpub, "DocbookEpub")
env.AddMethod(DocbookHtml, "DocbookHtml")
env.AddMethod(DocbookHtmlChunked, "DocbookHtmlChunked")
env.AddMethod(DocbookHtmlhelp, "DocbookHtmlhelp")
env.AddMethod(DocbookPdf, "DocbookPdf")
env.AddMethod(DocbookMan, "DocbookMan")
env.AddMethod(DocbookSlidesPdf, "DocbookSlidesPdf")
env.AddMethod(DocbookSlidesHtml, "DocbookSlidesHtml")
env.AddMethod(DocbookXInclude, "DocbookXInclude")
env.AddMethod(DocbookXslt, "DocbookXslt")
def exists(env):
return 1
|
dhruvsrivastava/OJ | refs/heads/master | flask/lib/python2.7/site-packages/sqlalchemy/engine/interfaces.py | 50 | # engine/interfaces.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define core interfaces used by the engine system."""
from .. import util, event
# backwards compat
from ..sql.compiler import Compiled, TypeCompiler
class Dialect(object):
"""Define the behavior of a specific database and DB-API combination.
Any aspect of metadata definition, SQL query generation,
execution, result-set handling, or anything else which varies
between databases is defined under the general category of the
Dialect. The Dialect acts as a factory for other
database-specific object implementations including
ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.
All Dialects implement the following attributes:
name
identifying name for the dialect from a DBAPI-neutral point of view
(i.e. 'sqlite')
driver
identifying name for the dialect's DBAPI
positional
True if the paramstyle for this Dialect is positional.
paramstyle
the paramstyle to be used (some DB-APIs support multiple
paramstyles).
convert_unicode
True if Unicode conversion should be applied to all ``str``
types.
encoding
type of encoding to use for unicode, usually defaults to
'utf-8'.
statement_compiler
a :class:`.Compiled` class used to compile SQL statements
ddl_compiler
a :class:`.Compiled` class used to compile DDL statements
server_version_info
a tuple containing a version number for the DB backend in use.
This value is only available for supporting dialects, and is
typically populated during the initial connection to the database.
default_schema_name
the name of the default schema. This value is only available for
supporting dialects, and is typically populated during the
initial connection to the database.
execution_ctx_cls
a :class:`.ExecutionContext` class used to handle statement execution
execute_sequence_format
either the 'tuple' or 'list' type, depending on what cursor.execute()
accepts for the second argument (they vary).
preparer
a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to
quote identifiers.
supports_alter
``True`` if the database supports ``ALTER TABLE``.
max_identifier_length
The maximum length of identifier names.
supports_unicode_statements
Indicate whether the DB-API can receive SQL statements as Python
unicode strings
supports_unicode_binds
Indicate whether the DB-API can receive string bind parameters
as Python unicode strings
supports_sane_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements.
supports_sane_multi_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements when executed via
executemany.
preexecute_autoincrement_sequences
True if 'implicit' primary key functions must be executed separately
in order to get their value. This is currently oriented towards
Postgresql.
implicit_returning
use RETURNING or equivalent during INSERT execution in order to load
newly generated primary keys and other column defaults in one execution,
which are then available via inserted_primary_key.
If an insert statement has returning() specified explicitly,
the "implicit" functionality is not used and inserted_primary_key
will not be available.
dbapi_type_map
A mapping of DB-API type objects present in this Dialect's
DB-API implementation mapped to TypeEngine implementations used
by the dialect.
This is used to apply types to result sets based on the DB-API
types present in cursor.description; it only takes effect for
result sets against textual statements where no explicit
typemap was present.
colspecs
A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
dialect instance itself.
supports_default_values
Indicates if the construct ``INSERT INTO tablename DEFAULT
VALUES`` is supported
supports_sequences
Indicates if the dialect supports CREATE SEQUENCE or similar.
sequences_optional
If True, indicates if the "optional" flag on the Sequence() construct
should signal to not generate a CREATE SEQUENCE. Applies only to
dialects that support sequences. Currently used only to allow Postgresql
SERIAL to be used on a column that specifies Sequence() for usage on
other backends.
supports_native_enum
Indicates if the dialect supports a native ENUM construct.
This will prevent types.Enum from generating a CHECK
constraint when that type is used.
supports_native_boolean
Indicates if the dialect supports a native boolean construct.
This will prevent types.Boolean from generating a CHECK
constraint when that type is used.
dbapi_exception_translation_map
A dictionary of names that will contain as values the names of
pep-249 exceptions ("IntegrityError", "OperationalError", etc)
keyed to alternate class names, to support the case where a
DBAPI has exception classes that aren't named as they are
referred to (e.g. IntegrityError = MyException). In the vast
majority of cases this dictionary is empty.
.. versionadded:: 1.0.5
"""
_has_events = False
def create_connect_args(self, url):
"""Build DB-API compatible connection arguments.
Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple
consisting of a `*args`/`**kwargs` suitable to send directly
to the dbapi's connect function.
"""
raise NotImplementedError()
@classmethod
def type_descriptor(cls, typeobj):
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
:func:`.types.adapt_type` function in the types module to
accomplish this.
The returned result is cached *per dialect class* so can
contain no dialect-instance state.
"""
raise NotImplementedError()
def initialize(self, connection):
"""Called during strategized creation of the dialect with a
connection.
Allows dialects to configure options based on server version info or
other properties.
The connection passed here is a SQLAlchemy Connection object,
with full capabilities.
The initialize() method of the base dialect should be called via
super().
"""
pass
def reflecttable(
self, connection, table, include_columns, exclude_columns):
"""Load table description from the database.
Given a :class:`.Connection` and a
:class:`~sqlalchemy.schema.Table` object, reflect its columns and
properties from the database.
The implementation of this method is provided by
:meth:`.DefaultDialect.reflecttable`, which makes use of
:class:`.Inspector` to retrieve column information.
Dialects should **not** seek to implement this method, and should
instead implement individual schema inspection operations such as
:meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`,
etc.
"""
raise NotImplementedError()
def get_columns(self, connection, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return column
information as a list of dictionaries with these keys:
name
the column's name
type
[sqlalchemy.types#TypeEngine]
nullable
boolean
default
the column's default value
autoincrement
boolean
sequence
a dictionary of the form
{'name' : str, 'start' :int, 'increment': int, 'minvalue': int,
'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool,
'cycle': bool}
Additional column attributes may be present.
"""
raise NotImplementedError()
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Deprecated. This method is only called by the default
implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should
instead implement the :meth:`.Dialect.get_pk_constraint` method
directly.
"""
raise NotImplementedError()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about the primary key constraint on
table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return primary
key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
raise NotImplementedError()
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return foreign
key information as a list of dicts with these keys:
name
the constraint's name
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
"""
raise NotImplementedError()
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
raise NotImplementedError()
def get_temp_table_names(self, connection, schema=None, **kw):
"""Return a list of temporary table names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
raise NotImplementedError()
def get_temp_view_names(self, connection, schema=None, **kw):
"""Return a list of temporary view names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
raise NotImplementedError()
def get_indexes(self, connection, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a :class:`.Connection`, a string
`table_name` and an optional string `schema`, return index
information as a list of dictionaries with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
"""
raise NotImplementedError()
def get_unique_constraints(
self, connection, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
\**kw
other options passed to the dialect's get_unique_constraints()
method.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def normalize_name(self, name):
"""convert the given name to lowercase if it is detected as
case insensitive.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def denormalize_name(self, name):
"""convert the given name to a case insensitive identifier
for the backend if it is an all-lowercase name.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def has_table(self, connection, table_name, schema=None):
"""Check the existence of a particular table in the database.
Given a :class:`.Connection` object and a string
`table_name`, return True if the given table (possibly within
the specified `schema`) exists in the database, False
otherwise.
"""
raise NotImplementedError()
def has_sequence(self, connection, sequence_name, schema=None):
"""Check the existence of a particular sequence in the database.
Given a :class:`.Connection` object and a string
`sequence_name`, return True if the given sequence exists in
the database, False otherwise.
"""
raise NotImplementedError()
def _get_server_version_info(self, connection):
"""Retrieve the server version info from the given connection.
This is used by the default implementation to populate the
"server_version_info" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def do_begin(self, dbapi_connection):
"""Provide an implementation of ``connection.begin()``, given a
DB-API connection.
The DBAPI has no dedicated "begin" method and it is expected
that transactions are implicit. This hook is provided for those
DBAPIs that might need additional help in this area.
Note that :meth:`.Dialect.do_begin` is not called unless a
:class:`.Transaction` object is in use. The
:meth:`.Dialect.do_autocommit`
hook is provided for DBAPIs that need some extra commands emitted
after a commit in order to enter the next transaction, when the
SQLAlchemy :class:`.Connection` is used in its default "autocommit"
mode.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_rollback(self, dbapi_connection):
"""Provide an implementation of ``connection.rollback()``, given
a DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_commit(self, dbapi_connection):
"""Provide an implementation of ``connection.commit()``, given a
DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_close(self, dbapi_connection):
"""Provide an implementation of ``connection.close()``, given a DBAPI
connection.
This hook is called by the :class:`.Pool` when a connection has been
detached from the pool, or is being returned beyond the normal
capacity of the pool.
.. versionadded:: 0.8
"""
raise NotImplementedError()
def create_xid(self):
"""Create a two-phase transaction ID.
This id will be passed to do_begin_twophase(),
do_rollback_twophase(), do_commit_twophase(). Its format is
unspecified.
"""
raise NotImplementedError()
def do_savepoint(self, connection, name):
"""Create a savepoint with the given name.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_rollback_to_savepoint(self, connection, name):
"""Rollback a connection to the named savepoint.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_release_savepoint(self, connection, name):
"""Release the named savepoint on a connection.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_begin_twophase(self, connection, xid):
"""Begin a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_prepare_twophase(self, connection, xid):
"""Prepare a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Rollback a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Commit a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_recover_twophase(self, connection):
"""Recover list of uncommited prepared two phase transaction
identifiers on the given connection.
:param connection: a :class:`.Connection`.
"""
raise NotImplementedError()
def do_executemany(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.executemany(statement,
parameters)``."""
raise NotImplementedError()
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.execute(statement,
parameters)``."""
raise NotImplementedError()
def do_execute_no_params(self, cursor, statement, parameters,
context=None):
"""Provide an implementation of ``cursor.execute(statement)``.
The parameter collection should not be sent.
"""
raise NotImplementedError()
def is_disconnect(self, e, connection, cursor):
"""Return True if the given DB-API error indicates an invalid
connection"""
raise NotImplementedError()
def connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable accepts a single argument "conn" which is the
DBAPI connection itself. It has no return value.
This is used to set dialect-wide per-connection options such as
isolation modes, unicode modes, etc.
If a callable is returned, it will be assembled into a pool listener
that receives the direct DBAPI connection, with all wrappers removed.
If None is returned, no listener will be generated.
"""
return None
def reset_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, revert its isolation to the default.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`.Connection` and
:class:`.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`.Connection` isolation level
:paramref:`.create_engine.isolation_level` -
set per :class:`.Engine` isolation level
"""
raise NotImplementedError()
def set_isolation_level(self, dbapi_conn, level):
"""Given a DBAPI connection, set its isolation level.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`.Connection` and
:class:`.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`.Connection` isolation level
:paramref:`.create_engine.isolation_level` -
set per :class:`.Engine` isolation level
"""
raise NotImplementedError()
def get_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level.
When working with a :class:`.Connection` object, the corresponding
DBAPI connection may be procured using the
:attr:`.Connection.connection` accessor.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`.Connection` and
:class:`.Engine` isolation level facilities;
these APIs should be preferred for most typical use cases.
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`.Connection` isolation level
:paramref:`.create_engine.isolation_level` -
set per :class:`.Engine` isolation level
"""
raise NotImplementedError()
@classmethod
def get_dialect_cls(cls, url):
"""Given a URL, return the :class:`.Dialect` that will be used.
This is a hook that allows an external plugin to provide functionality
around an existing dialect, by allowing the plugin to be loaded
from the url based on an entrypoint, and then the plugin returns
the actual dialect to be used.
By default this just returns the cls.
.. versionadded:: 1.0.3
"""
return cls
@classmethod
def engine_created(cls, engine):
"""A convenience hook called before returning the final :class:`.Engine`.
If the dialect returned a different class from the
:meth:`.get_dialect_cls`
method, then the hook is called on both classes, first on
the dialect class returned by the :meth:`.get_dialect_cls` method and
then on the class on which the method was called.
The hook should be used by dialects and/or wrappers to apply special
events to the engine or its components. In particular, it allows
a dialect-wrapping class to apply dialect-level events.
.. versionadded:: 1.0.3
"""
pass
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
ExecutionContext should have these data members:
connection
Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection.
root_connection
Connection object which is the source of this ExecutionContext. This
Connection may have close_with_result=True set, in which case it can
only be used once.
dialect
dialect which created this ExecutionContext.
cursor
DB-API cursor procured from the connection,
compiled
if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed,
statement
string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed.
parameters
bind parameters passed to the execute() method. For compiled
statements, this is a dictionary or list of dictionaries. For
textual statements, it should be in a format suitable for the
dialect's paramstyle (i.e. dict or list of dicts for non
positional, list or list of lists/tuples for positional).
isinsert
True if the statement is an INSERT.
isupdate
True if the statement is an UPDATE.
should_autocommit
True if the statement is a "committable" statement.
prefetch_cols
a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates.
postfetch_cols
a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates.
"""
exception = None
"""A DBAPI-level exception that was caught when this ExecutionContext
attempted to execute a statement.
This attribute is meaningful only within the
:meth:`.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.is_disconnect`
:meth:`.ConnectionEvents.dbapi_error`
"""
is_disconnect = None
"""Boolean flag set to True or False when a DBAPI-level exception
is caught when this ExecutionContext attempted to execute a statement.
This attribute is meaningful only within the
:meth:`.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.exception`
:meth:`.ConnectionEvents.dbapi_error`
"""
def create_cursor(self):
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self):
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def post_exec(self):
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def result(self):
"""Return a result object corresponding to this ExecutionContext.
Returns a ResultProxy.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def should_autocommit_text(self, statement):
"""Parse the given textual statement and return True if it refers to
a "committable" statement"""
raise NotImplementedError()
def lastrow_has_defaults(self):
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self):
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`.ResultProxy.rowcount` for details on this.
"""
raise NotImplementedError()
class Connectable(object):
"""Interface for an object which supports execution of SQL constructs.
The two implementations of :class:`.Connectable` are
:class:`.Connection` and :class:`.Engine`.
Connectable must also implement the 'dialect' member which references a
:class:`.Dialect` instance.
"""
def connect(self, **kwargs):
"""Return a :class:`.Connection` object.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
def contextual_connect(self):
"""Return a :class:`.Connection` object which may be part of an ongoing
context.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the create() method on the given schema "
"object directly, i.e. :meth:`.Table.create`, "
":meth:`.Index.create`, :meth:`.MetaData.create_all`")
def create(self, entity, **kwargs):
"""Emit CREATE statements for the given schema entity.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the drop() method on the given schema "
"object directly, i.e. :meth:`.Table.drop`, "
":meth:`.Index.drop`, :meth:`.MetaData.drop_all`")
def drop(self, entity, **kwargs):
"""Emit DROP statements for the given schema entity.
"""
raise NotImplementedError()
def execute(self, object, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`."""
raise NotImplementedError()
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying cursor is closed after execution.
"""
raise NotImplementedError()
def _run_visitor(self, visitorcallable, element,
**kwargs):
raise NotImplementedError()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
raise NotImplementedError()
class ExceptionContext(object):
"""Encapsulate information about an error condition in progress.
This object exists solely to be passed to the
:meth:`.ConnectionEvents.handle_error` event, supporting an interface that
can be extended without backwards-incompatibility.
.. versionadded:: 0.9.7
"""
connection = None
"""The :class:`.Connection` in use during the exception.
This member is present, except in the case of a failure when
first connecting.
.. seealso::
:attr:`.ExceptionContext.engine`
"""
engine = None
"""The :class:`.Engine` in use during the exception.
This member should always be present, even in the case of a failure
when first connecting.
.. versionadded:: 1.0.0
"""
cursor = None
"""The DBAPI cursor object.
May be None.
"""
statement = None
"""String SQL statement that was emitted directly to the DBAPI.
May be None.
"""
parameters = None
"""Parameter collection that was emitted directly to the DBAPI.
May be None.
"""
original_exception = None
"""The exception object which was caught.
This member is always present.
"""
sqlalchemy_exception = None
"""The :class:`sqlalchemy.exc.StatementError` which wraps the original,
and will be raised if exception handling is not circumvented by the event.
May be None, as not all exception types are wrapped by SQLAlchemy.
For DBAPI-level exceptions that subclass the dbapi's Error class, this
field will always be present.
"""
chained_exception = None
"""The exception that was returned by the previous handler in the
exception chain, if any.
If present, this exception will be the one ultimately raised by
SQLAlchemy unless a subsequent handler replaces it.
May be None.
"""
execution_context = None
"""The :class:`.ExecutionContext` corresponding to the execution
operation in progress.
This is present for statement execution operations, but not for
operations such as transaction begin/end. It also is not present when
the exception was raised before the :class:`.ExecutionContext`
could be constructed.
Note that the :attr:`.ExceptionContext.statement` and
:attr:`.ExceptionContext.parameters` members may represent a
different value than that of the :class:`.ExecutionContext`,
potentially in the case where a
:meth:`.ConnectionEvents.before_cursor_execute` event or similar
modified the statement/parameters to be sent.
May be None.
"""
is_disconnect = None
"""Represent whether the exception as occurred represents a "disconnect"
condition.
This flag will always be True or False within the scope of the
:meth:`.ConnectionEvents.handle_error` handler.
SQLAlchemy will defer to this flag in order to determine whether or not
the connection should be invalidated subsequently. That is, by
assigning to this flag, a "disconnect" event which then results in
a connection and pool invalidation can be invoked or prevented by
changing this flag.
"""
invalidate_pool_on_disconnect = True
"""Represent whether all connections in the pool should be invalidated
when a "disconnect" condition is in effect.
Setting this flag to False within the scope of the
:meth:`.ConnectionEvents.handle_error` event will have the effect such
that the full collection of connections in the pool will not be
invalidated during a disconnect; only the current connection that is the
subject of the error will actually be invalidated.
The purpose of this flag is for custom disconnect-handling schemes where
the invalidation of other connections in the pool is to be performed
based on other conditions, or even on a per-connection basis.
.. versionadded:: 1.0.3
"""
|
archhurd/archweb | refs/heads/master | main/migrations/0027_auto__chg_field_package_compressed_size__chg_field_package_installed_s.py | 5 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Package.compressed_size'
db.alter_column('packages', 'compressed_size', self.gf('django.db.models.fields.BigIntegerField')(null=True))
# Changing field 'Package.installed_size'
db.alter_column('packages', 'installed_size', self.gf('django.db.models.fields.BigIntegerField')(null=True))
def backwards(self, orm):
# Changing field 'Package.compressed_size'
db.alter_column('packages', 'compressed_size', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
# Changing field 'Package.installed_size'
db.alter_column('packages', 'installed_size', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.arch': {
'Meta': {'ordering': "['name']", 'object_name': 'Arch', 'db_table': "'arches'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.donor': {
'Meta': {'ordering': "['name']", 'object_name': 'Donor', 'db_table': "'donors'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.mirror': {
'Meta': {'ordering': "('country', 'name')", 'object_name': 'Mirror'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isos': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rsync_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'rsync_user': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'tier': ('django.db.models.fields.SmallIntegerField', [], {'default': '2'}),
'upstream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Mirror']", 'null': 'True'})
},
'main.mirrorprotocol': {
'Meta': {'object_name': 'MirrorProtocol'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'})
},
'main.mirrorrsync': {
'Meta': {'object_name': 'MirrorRsync'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rsync_ips'", 'to': "orm['main.Mirror']"})
},
'main.mirrorurl': {
'Meta': {'object_name': 'MirrorUrl'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['main.Mirror']"}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': "orm['main.MirrorProtocol']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.news': {
'Meta': {'ordering': "['-postdate', '-id']", 'object_name': 'News', 'db_table': "'news'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'news_author'", 'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postdate': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.package': {
'Meta': {'ordering': "('pkgname',)", 'object_name': 'Package', 'db_table': "'packages'"},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Arch']"}),
'build_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'compressed_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'files_last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'flag_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'packager_str': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'pkgname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Repo']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'main.packagedepend': {
'Meta': {'object_name': 'PackageDepend', 'db_table': "'package_depends'"},
'depname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'depvcmp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.packagefile': {
'Meta': {'object_name': 'PackageFile', 'db_table': "'package_files'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.repo': {
'Meta': {'ordering': "['name']", 'object_name': 'Repo', 'db_table': "'repos'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.signoff': {
'Meta': {'object_name': 'Signoff'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolist': {
'Meta': {'object_name': 'Todolist', 'db_table': "'todolists'"},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'date_added': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'main.todolistpkg': {
'Meta': {'unique_together': "(('list', 'pkg'),)", 'object_name': 'TodolistPkg', 'db_table': "'todolist_pkgs'"},
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Todolist']"}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Package']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'user_profiles'"},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'allowed_repos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Repo']", 'symmetrical': 'False', 'blank': 'True'}),
'favorite_distros': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_contact': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'default': "'devs/silhouette.png'", 'max_length': '100'}),
'public_email': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'roles': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userprofile_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'yob': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['main']
|
mdaniel/intellij-community | refs/heads/master | python/testData/resolveTest/test.py | 30 | from unittest import TestCase
TestCase().assertIsNotNo<caret> |
incaser/server-tools | refs/heads/8.0 | base_external_dbsource/base_external_dbsource.py | 7 | # -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis
# 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import logging
import psycopg2
from openerp.osv import orm, fields
from openerp.tools.translate import _
import openerp.tools as tools
_logger = logging.getLogger(__name__)
CONNECTORS = [('postgresql', 'PostgreSQL')]
try:
import sqlalchemy
CONNECTORS.append(('sqlite', 'SQLite'))
try:
import pymssql
CONNECTORS.append(('mssql', 'Microsoft SQL Server'))
assert pymssql
except (ImportError, AssertionError):
_logger.info('MS SQL Server not available. Please install "pymssql"\
python package.')
try:
import MySQLdb
CONNECTORS.append(('mysql', 'MySQL'))
assert MySQLdb
except (ImportError, AssertionError):
_logger.info('MySQL not available. Please install "mysqldb"\
python package.')
except:
_logger.info('SQL Alchemy not available. Please install "slqalchemy"\
python package.')
try:
import pyodbc
CONNECTORS.append(('pyodbc', 'ODBC'))
except:
_logger.info('ODBC libraries not available. Please install "unixodbc"\
and "python-pyodbc" packages.')
try:
import cx_Oracle
CONNECTORS.append(('cx_Oracle', 'Oracle'))
except:
_logger.info('Oracle libraries not available. Please install "cx_Oracle"\
python package.')
class base_external_dbsource(orm.Model):
_name = "base.external.dbsource"
_description = 'External Database Sources'
_columns = {
'name': fields.char('Datasource name', required=True, size=64),
'conn_string': fields.text('Connection string', help="""
Sample connection strings:
- Microsoft SQL Server:
mssql+pymssql://username:%s@server:port/dbname?charset=utf8
- MySQL: mysql://user:%s@server:port/dbname
- ODBC: DRIVER={FreeTDS};SERVER=server.address;Database=mydb;UID=sa
- ORACLE: username/%s@//server.address:port/instance
- PostgreSQL:
dbname='template1' user='dbuser' host='localhost' port='5432' password=%s
- SQLite: sqlite:///test.db
"""),
'password': fields.char('Password', size=40),
'connector': fields.selection(CONNECTORS, 'Connector',
required=True,
help="If a connector is missing from the\
list, check the server log to confirm\
that the required components were\
detected."),
}
def conn_open(self, cr, uid, id1):
# Get dbsource record
data = self.browse(cr, uid, id1)
# Build the full connection string
connStr = data.conn_string
if data.password:
if '%s' not in data.conn_string:
connStr += ';PWD=%s'
connStr = connStr % data.password
# Try to connect
if data.connector == 'cx_Oracle':
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.UTF8'
conn = cx_Oracle.connect(connStr)
elif data.connector == 'pyodbc':
conn = pyodbc.connect(connStr)
elif data.connector in ('sqlite', 'mysql', 'mssql'):
conn = sqlalchemy.create_engine(connStr).connect()
elif data.connector == 'postgresql':
conn = psycopg2.connect(connStr)
return conn
def execute(self, cr, uid, ids, sqlquery, sqlparams=None, metadata=False,
context=None):
"""Executes SQL and returns a list of rows.
"sqlparams" can be a dict of values, that can be referenced in
the SQL statement using "%(key)s" or, in the case of Oracle,
":key".
Example:
sqlquery = "select * from mytable where city = %(city)s and
date > %(dt)s"
params = {'city': 'Lisbon',
'dt': datetime.datetime(2000, 12, 31)}
If metadata=True, it will instead return a dict containing the
rows list and the columns list, in the format:
{ 'cols': [ 'col_a', 'col_b', ...]
, 'rows': [ (a0, b0, ...), (a1, b1, ...), ...] }
"""
data = self.browse(cr, uid, ids)
rows, cols = list(), list()
for obj in data:
conn = self.conn_open(cr, uid, obj.id)
if obj.connector in ["sqlite", "mysql", "mssql"]:
# using sqlalchemy
cur = conn.execute(sqlquery, sqlparams)
if metadata:
cols = cur.keys()
rows = [r for r in cur]
else:
# using other db connectors
cur = conn.cursor()
cur.execute(sqlquery, sqlparams)
if metadata:
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.close()
if metadata:
return{'cols': cols, 'rows': rows}
else:
return rows
def connection_test(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context):
conn = False
try:
conn = self.conn_open(cr, uid, obj.id)
except Exception as e:
raise orm.except_orm(_("Connection test failed!"),
_("Here is what we got instead:\n %s")
% tools.ustr(e))
finally:
try:
if conn:
conn.close()
except Exception:
# ignored, just a consequence of the previous exception
pass
# TODO: if OK a (wizard) message box should be displayed
raise orm.except_orm(_("Connection test succeeded!"),
_("Everything seems properly set up!"))
|
vitan/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/str/models.py | 54 | # -*- coding: utf-8 -*-
"""
2. Adding __str__() or __unicode__() to models
Although it's not a strict requirement, each model should have a
``_str__()`` or ``__unicode__()`` method to return a "human-readable"
representation of the object. Do this not only for your own sanity when dealing
with the interactive prompt, but also because objects' representations are used
throughout Django's automatically-generated admin.
Normally, you should write ``__unicode__()`` method, since this will work for
all field types (and Django will automatically provide an appropriate
``__str__()`` method). However, you can write a ``__str__()`` method directly,
if you prefer. You must be careful to encode the results correctly, though.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
# Caution: this is only safe if you are certain that headline will be
# in ASCII.
return self.headline
@python_2_unicode_compatible
class InternationalArticle(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
|
Robpol86/PythonTemplate | refs/heads/master | tests/test_main.py | 1 | """Example test."""
from replace_me import main
def test(capsys):
"""Example test."""
main()
stdout, stderr = capsys.readouterr()
assert 'Hello World!\n' == stdout
assert not stderr
|
drawks/ansible | refs/heads/devel | contrib/inventory/spacewalk.py | 28 | #!/usr/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <jonEbird@gmail.com>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import os
import time
from optparse import OptionParser
import subprocess
import json
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import configparser as ConfigParser
base_dir = os.path.dirname(os.path.realpath(__file__))
default_ini_file = os.path.join(base_dir, "spacewalk.ini")
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
# Sanity check
if not os.path.exists(SW_REPORT):
print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
# ------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = ['spacewalk_' + key for key in keys]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
# ------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk', 'cache_age'):
CACHE_AGE = config.get('spacewalk', 'cache_age')
if not options.org_number and config.has_option('spacewalk', 'org_number'):
options.org_number = config.get('spacewalk', 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
# ------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError) as e:
print('Problem executing the command "%s system-groups": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
# List out the known server from Spacewalk
# ------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
meta = {"hostvars": {}}
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[system['spacewalk_group_id']]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict([(k, list(s)) for k, s in iteritems(groups)])
final["_meta"] = meta
print(json.dumps(final))
# print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
# ------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
|
google-code-export/rietveld | refs/heads/master | main.py | 23 | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main program for Rietveld.
This is also a template for running a Django app under Google App
Engine, especially when using a newer version of Django than provided
in the App Engine standard library.
The site-specific code is all in other files: urls.py, models.py,
views.py, settings.py.
"""
# Standard Python imports.
import os
import sys
import logging
# Log a message each time this module get loaded.
logging.info('Loading %s, app version = %s',
__name__, os.getenv('CURRENT_VERSION_ID'))
import appengine_config
# AppEngine imports.
from google.appengine.ext.webapp import util
# Import webapp.template. This makes most Django setup issues go away.
from google.appengine.ext.webapp import template
# Django 1.2+ requires DJANGO_SETTINGS_MODULE environment variable to be set
# http://code.google.com/appengine/docs/python/tools/libraries.html#Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# Import various parts of Django.
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch.dispatcher
import django.forms
def log_exception(*args, **kwds):
"""Django signal handler to log an exception."""
cls, err = sys.exc_info()[:2]
logging.exception('Exception in request: %s: %s', cls.__name__, err)
# Log all exceptions detected by Django.
django.core.signals.got_request_exception.connect(log_exception)
# Unregister Django's default rollback event handler.
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
def real_main():
"""Main program."""
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
def profile_main():
"""Main program for profiling."""
import cProfile
import pstats
import StringIO
prof = cProfile.Profile()
prof = prof.runctx('real_main()', globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
# stats.strip_dirs() # Don't; too many modules are named __init__.py.
stats.sort_stats('time') # 'time', 'cumulative' or 'calls'
stats.print_stats() # Optional arg: how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
print '\n<hr>'
print '<h1>Profile</h1>'
print '<pre>'
print stream.getvalue()[:1000000]
print '</pre>'
# Set this to profile_main to enable profiling.
main = real_main
if __name__ == '__main__':
main()
|
walteryang47/ovirt-engine | refs/heads/eayunos-4.2 | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine-common/config/__init__.py | 8 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ovirt-host-setup config plugin."""
from otopi import util
from . import jboss
from . import firewall
@util.export
def createPlugins(context):
jboss.Plugin(context=context)
firewall.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
|
colaftc/webtool | refs/heads/master | top/api/rest/WlbTmsorderQueryRequest.py | 1 | '''
Created by auto_sdk on 2014.11.04
'''
from top.api.base import RestApi
class WlbTmsorderQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.order_code = None
self.page_no = None
self.page_size = None
def getapiname(self):
return 'taobao.wlb.tmsorder.query'
|
AnderEnder/ansible-modules-extras | refs/heads/devel | cloud/cloudstack/cs_portforward.py | 22 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
required: false
default: null
state:
description:
- State of the port forwarding rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the port forwarding rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
required: false
default: null
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
required: false
default: false
domain:
description:
- Domain the C(vm) is related to.
required: false
default: null
account:
description:
- Account the C(vm) is related to.
required: false
default: null
project:
description:
- Name of the project the C(vm) is located in.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# 1.2.3.4:80 -> web01:8080
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
# forward SSH and open firewall
- local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
# forward DNS traffic, but do not open firewall
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
# remove ssh port forwarding
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
args['projectid'] = self.get_project(key='id')
portforwarding_rules = self.cs.listPortForwardingRules(**args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if protocol == rule['protocol'] \
and public_port == int(rule['publicport']):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
return portforwarding_rule
def create_portforwarding_rule(self):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
self.absent_portforwarding_rule()
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {}
args['id'] = portforwarding_rule['id']
if not self.module.check_mode:
res = self.cs.deletePortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
if portforwarding_rule:
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.iteritems():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
public_port = dict(type='int', required=True),
public_end_port = dict(type='int', default=None),
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(type='bool', default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
dysonltd/gts | refs/heads/develop | app/lib/gtest-1.6.0/test/gtest_catch_exceptions_test.py | 414 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
lmprice/ansible | refs/heads/devel | lib/ansible/plugins/lookup/random_choice.py | 157 | # (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: random_choice
author: Michael DeHaan <michael.dehaan@gmail.com>
version_added: "1.1"
short_description: return random element from list
description:
- The 'random_choice' feature can be used to pick something at random. While it's not a load balancer (there are modules for those),
it can somewhat be used as a poor man's load balancer in a MacGyver like situation.
- At a more basic level, they can be used to add chaos and excitement to otherwise predictable automation environments.
"""
EXAMPLES = """
- name: Magic 8 ball for MUDs
debug:
msg: "{{ item }}"
with_random_choice:
- "go through the door"
- "drink from the goblet"
- "press the red button"
- "do nothing"
"""
RETURN = """
_raw:
description:
- random item
"""
import random
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
ret = terms
if terms:
try:
ret = [random.choice(terms)]
except Exception as e:
raise AnsibleError("Unable to choose random term: %s" % to_native(e))
return ret
|
ceph/teuthology | refs/heads/master | teuthology/provision/cloud/base.py | 3 | import logging
from copy import deepcopy
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider as lc_Provider
import teuthology.orchestra.remote
import teuthology.provision.cloud
from teuthology.misc import canonicalize_hostname, decanonicalize_hostname
log = logging.getLogger(__name__)
class Provider(object):
_driver_posargs = list()
def __init__(self, name, conf):
self.name = name
self.conf = conf
self.driver_name = self.conf['driver']
def _get_driver(self):
driver_type = get_driver(
getattr(lc_Provider, self.driver_name.upper())
)
driver_args = self._get_driver_args()
driver = driver_type(
*[driver_args.pop(arg_name) for arg_name in self._driver_posargs],
**driver_args
)
return driver
driver = property(fget=_get_driver)
def _get_driver_args(self):
return deepcopy(self.conf['driver_args'])
class Provisioner(object):
def __init__(
self, provider, name, os_type=None, os_version=None,
conf=None, user='ubuntu',
):
if isinstance(provider, str):
provider = teuthology.provision.cloud.get_provider(provider)
self.provider = provider
self.name = decanonicalize_hostname(name)
self.hostname = canonicalize_hostname(name, user=None)
self.os_type = os_type
self.os_version = os_version
self.user = user
def create(self):
try:
return self._create()
except Exception:
log.exception("Failed to create %s", self.name)
return False
def _create(self):
pass
def destroy(self):
try:
return self._destroy()
except Exception:
log.exception("Failed to destroy %s", self.name)
return False
def _destroy(self):
pass
@property
def remote(self):
if not hasattr(self, '_remote'):
self._remote = teuthology.orchestra.remote.Remote(
"%s@%s" % (self.user, self.name),
)
return self._remote
def __repr__(self):
template = "%s(provider='%s', name='%s', os_type='%s', " \
"os_version='%s')"
return template % (
self.__class__.__name__,
self.provider.name,
self.name,
self.os_type,
self.os_version,
)
|
Serag8/Bachelor | refs/heads/master | google_appengine/google/storage/speckle/python/django/management/commands/__init__.py | 1333 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
gojira/tensorflow | refs/heads/master | tensorflow/python/estimator/exporter_test.py | 13 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `Exporter`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
class BestExporterTest(test.TestCase):
def test_error_out_if_exports_to_keep_is_zero(self):
def _serving_input_receiver_fn():
pass
with self.assertRaisesRegexp(ValueError, "positive number"):
exporter = exporter_lib.BestExporter(
name="best_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
exports_to_keep=0)
self.assertEqual("best_exporter", exporter.name)
def test_best_exporter(self):
def _serving_input_receiver_fn():
pass
export_dir_base = tempfile.mkdtemp()
gfile.MkDir(export_dir_base)
gfile.MkDir(export_dir_base + "/export")
gfile.MkDir(export_dir_base + "/eval")
exporter = exporter_lib.BestExporter(
name="best_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=5)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.export_savedmodel.return_value = "export_result_path"
estimator.model_dir = export_dir_base
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {}, False)
self.assertEqual("export_result_path", export_result)
estimator.export_savedmodel.assert_called_with(
export_dir_base,
_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False,
checkpoint_path="checkpoint_path",
strip_default_attrs=True)
def test_best_export_is_saved(self):
def _serving_input_receiver_fn():
pass
export_dir_base = tempfile.mkdtemp()
gfile.MkDir(export_dir_base)
gfile.MkDir(export_dir_base + "/export")
gfile.MkDir(export_dir_base + "/eval")
exporter = exporter_lib.BestExporter(
name="best_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=1)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.export_savedmodel.return_value = "export_result_path"
estimator.model_dir = export_dir_base
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 0.5}, False)
self.assertTrue(estimator.export_savedmodel.called)
self.assertEqual("export_result_path", export_result)
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 0.6}, False)
self.assertEqual(None, export_result)
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 0.4}, False)
self.assertEqual("export_result_path", export_result)
def test_best_exporter_with_preemption(self):
def _serving_input_receiver_fn():
pass
export_dir_base = tempfile.mkdtemp()
gfile.MkDir(export_dir_base)
gfile.MkDir(export_dir_base + "/export")
gfile.MkDir(export_dir_base + "/eval")
eval_dir_base = os.path.join(export_dir_base, "eval_continuous")
estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 50}, 1)
estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2)
exporter = exporter_lib.BestExporter(
name="best_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
event_file_pattern="eval_continuous/*.tfevents.*",
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=1)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.model_dir = export_dir_base
estimator.export_savedmodel.return_value = "export_result_path"
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 100}, False)
self.assertEqual(None, export_result)
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 10}, False)
self.assertEqual("export_result_path", export_result)
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 20}, False)
self.assertEqual(None, export_result)
def test_best_exporter_with_empty_event(self):
def _serving_input_receiver_fn():
pass
export_dir_base = tempfile.mkdtemp()
gfile.MkDir(export_dir_base)
gfile.MkDir(export_dir_base + "/export")
gfile.MkDir(export_dir_base + "/eval")
eval_dir_base = os.path.join(export_dir_base, "eval_continuous")
estimator_lib._write_dict_to_summary(eval_dir_base, {}, 1)
estimator_lib._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2)
exporter = exporter_lib.BestExporter(
name="best_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
event_file_pattern="eval_continuous/*.tfevents.*",
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=1)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.model_dir = export_dir_base
estimator.export_savedmodel.return_value = "export_result_path"
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 100}, False)
self.assertEqual(None, export_result)
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {"loss": 10}, False)
self.assertEqual("export_result_path", export_result)
def test_garbage_collect_exports(self):
export_dir_base = tempfile.mkdtemp()
gfile.MkDir(export_dir_base)
gfile.MkDir(export_dir_base + "/export")
gfile.MkDir(export_dir_base + "/eval")
export_dir_1 = _create_test_export_dir(export_dir_base)
export_dir_2 = _create_test_export_dir(export_dir_base)
export_dir_3 = _create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
self.assertTrue(gfile.Exists(export_dir_1))
self.assertTrue(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def _serving_input_receiver_fn():
return array_ops.constant([1]), None
exporter = exporter_lib.BestExporter(
name="best_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
exports_to_keep=2)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.model_dir = export_dir_base
# Garbage collect all but the most recent 2 exports,
# where recency is determined based on the timestamp directory names.
exporter.export(estimator, export_dir_base, None, None, False)
self.assertFalse(gfile.Exists(export_dir_1))
self.assertFalse(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
class LatestExporterTest(test.TestCase):
def test_error_out_if_exports_to_keep_is_zero(self):
def _serving_input_receiver_fn():
pass
with self.assertRaisesRegexp(ValueError, "positive number"):
exporter = exporter_lib.LatestExporter(
name="latest_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
exports_to_keep=0)
self.assertEqual("latest_exporter", exporter.name)
def test_latest_exporter(self):
def _serving_input_receiver_fn():
pass
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
exporter = exporter_lib.LatestExporter(
name="latest_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=5)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.export_savedmodel.return_value = "export_result_path"
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {}, False)
self.assertEqual("export_result_path", export_result)
estimator.export_savedmodel.assert_called_with(
export_dir_base,
_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False,
checkpoint_path="checkpoint_path",
strip_default_attrs=True)
def test_only_the_last_export_is_saved(self):
def _serving_input_receiver_fn():
pass
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
exporter = exporter_lib.FinalExporter(
name="latest_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
estimator.export_savedmodel.return_value = "export_result_path"
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {}, False)
self.assertFalse(estimator.export_savedmodel.called)
self.assertEqual(None, export_result)
export_result = exporter.export(estimator, export_dir_base,
"checkpoint_path", {}, True)
self.assertEqual("export_result_path", export_result)
estimator.export_savedmodel.assert_called_with(
export_dir_base,
_serving_input_receiver_fn,
assets_extra={"from/path": "to/path"},
as_text=False,
checkpoint_path="checkpoint_path",
strip_default_attrs=True)
def test_garbage_collect_exports(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
export_dir_1 = _create_test_export_dir(export_dir_base)
export_dir_2 = _create_test_export_dir(export_dir_base)
export_dir_3 = _create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
self.assertTrue(gfile.Exists(export_dir_1))
self.assertTrue(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def _serving_input_receiver_fn():
return array_ops.constant([1]), None
exporter = exporter_lib.LatestExporter(
name="latest_exporter",
serving_input_receiver_fn=_serving_input_receiver_fn,
exports_to_keep=2)
estimator = test.mock.Mock(spec=estimator_lib.Estimator)
# Garbage collect all but the most recent 2 exports,
# where recency is determined based on the timestamp directory names.
exporter.export(estimator, export_dir_base, None, None, False)
self.assertFalse(gfile.Exists(export_dir_1))
self.assertFalse(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def _create_test_export_dir(export_dir_base):
export_dir = _get_timestamped_export_dir(export_dir_base)
gfile.MkDir(export_dir)
time.sleep(2)
return export_dir
def _get_timestamped_export_dir(export_dir_base):
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another worker is also writing exports.
# In this case we just wait one second to get a new timestamp and try again.
# If this fails several times in a row, then something is seriously wrong.
max_directory_creation_attempts = 10
attempts = 0
while attempts < max_directory_creation_attempts:
export_timestamp = int(time.time())
export_dir = os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes(
str(export_timestamp)))
if not gfile.Exists(export_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return export_dir
time.sleep(1)
attempts += 1
logging.warn(
"Export directory {} already exists; retrying (attempt {}/{})".format(
export_dir, attempts, max_directory_creation_attempts))
raise RuntimeError("Failed to obtain a unique export directory name after "
"{} attempts.".format(max_directory_creation_attempts))
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.