repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
chatcannon/numpy
|
refs/heads/master
|
numpy/matlib.py
|
161
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307],
[ 7.39337286e-309, 3.22135945e-309]]) #random
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0],
[ 6586976, 22740995]]) #random
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[ 0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return asmatrix(np.eye(n, M, k, dtype))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[ 0.68340382, 0.67926887, 0.83271405],
[ 0.00793551, 0.20468222, 0.95253525]]) #random
>>> np.matlib.rand((2, 3))
matrix([[ 0.84682055, 0.73626594, 0.11308016],
[ 0.85429008, 0.3294825 , 0.89139555]]) #random
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[ 0.46898646, 0.15163588, 0.95188261],
[ 0.59208621, 0.09561818, 0.00583606]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-0.09542833]]) #random
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.16198284, 0.0194571 , 0.18312985],
[-0.7509172 , 1.61055 , 0.45298599]]) #random
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
[ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1, 1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
return c.reshape(rows, cols)
|
viswimmer1/PythonGenerator
|
refs/heads/master
|
data/python_files/32935198/browseProjects.py
|
18
|
import hierlist, string, regutil, os
import win32con, win32ui, win32api
import commctrl
from pywin.mfc import dialog
import glob
import pyclbr
import pywin.framework.scriptutils
import afxres
class HLIErrorItem(hierlist.HierListItem):
def __init__(self, text):
self.text = text
hierlist.HierListItem.__init__(self)
def GetText(self):
return self.text
class HLICLBRItem(hierlist.HierListItem):
def __init__(self, name, file, lineno, suffix = ""):
# If the 'name' object itself has a .name, use it. Not sure
# how this happens, but seems pyclbr related.
# See PyWin32 bug 817035
self.name = getattr(name, "name", name)
self.file = file
self.lineno = lineno
self.suffix = suffix
def __cmp__(self, other):
return cmp(self.name, other.name)
def GetText(self):
return self.name + self.suffix
def TakeDefaultAction(self):
if self.file:
pywin.framework.scriptutils.JumpToDocument(self.file, self.lineno, bScrollToTop=1)
else:
win32ui.SetStatusText("The source of this object is unknown")
def PerformItemSelected(self):
if self.file is None:
msg = "%s - source can not be located." % (self.name, )
else:
msg = "%s defined at line %d of %s" % (self.name, self.lineno, self.file)
win32ui.SetStatusText(msg)
class HLICLBRClass(HLICLBRItem):
def __init__(self, clbrclass, suffix = ""):
try:
name = clbrclass.name
file = clbrclass.file
lineno = clbrclass.lineno
self.super = clbrclass.super
self.methods = clbrclass.methods
except AttributeError:
name = clbrclass
file = lineno = None
self.super = []; self.methods = {}
HLICLBRItem.__init__(self, name, file, lineno, suffix)
def GetSubList(self):
ret = []
for c in self.super:
ret.append(HLICLBRClass(c, " (Parent class)"))
for meth, lineno in self.methods.items():
ret.append(HLICLBRMethod(meth, self.file, lineno, " (method)"))
return ret
def IsExpandable(self):
return len(self.methods) + len(self.super)
def GetBitmapColumn(self):
return 21
class HLICLBRFunction(HLICLBRClass):
def GetBitmapColumn(self):
return 22
class HLICLBRMethod(HLICLBRItem):
def GetBitmapColumn(self):
return 22
class HLIModuleItem(hierlist.HierListItem):
def __init__(self, path):
hierlist.HierListItem.__init__(self)
self.path = path
def GetText(self):
return os.path.split(self.path)[1] + " (module)"
def IsExpandable(self):
return 1
def TakeDefaultAction(self):
win32ui.GetApp().OpenDocumentFile( self.path )
def GetBitmapColumn(self):
col = 4 # Default
try:
if win32api.GetFileAttributes(self.path) & win32con.FILE_ATTRIBUTE_READONLY:
col = 5
except win32api.error:
pass
return col
def GetSubList(self):
mod, path = pywin.framework.scriptutils.GetPackageModuleName(self.path)
win32ui.SetStatusText("Building class list - please wait...", 1)
win32ui.DoWaitCursor(1)
try:
try:
reader = pyclbr.readmodule_ex # Post 1.5.2 interface.
extra_msg = " or functions"
except AttributeError:
reader = pyclbr.readmodule
extra_msg = ""
data = reader(mod, [path])
if data:
ret = []
for item in data.values():
if item.__class__ != pyclbr.Class: # ie, it is a pyclbr Function instance (only introduced post 1.5.2)
ret.append(HLICLBRFunction( item, " (function)" ) )
else:
ret.append(HLICLBRClass( item, " (class)") )
ret.sort()
return ret
else:
return [HLIErrorItem("No Python classes%s in module." % (extra_msg,))]
finally:
win32ui.DoWaitCursor(0)
win32ui.SetStatusText(win32ui.LoadString(afxres.AFX_IDS_IDLEMESSAGE))
def MakePathSubList(path):
ret = []
for filename in glob.glob(os.path.join(path,'*')):
if os.path.isdir(filename) and os.path.isfile(os.path.join(filename, "__init__.py")):
ret.append(HLIDirectoryItem(filename, os.path.split(filename)[1]))
else:
if string.lower(os.path.splitext(filename)[1]) in ['.py', '.pyw']:
ret.append(HLIModuleItem(filename))
return ret
class HLIDirectoryItem(hierlist.HierListItem):
def __init__(self, path, displayName = None, bSubDirs = 0):
hierlist.HierListItem.__init__(self)
self.path = path
self.bSubDirs = bSubDirs
if displayName:
self.displayName = displayName
else:
self.displayName = path
def IsExpandable(self):
return 1
def GetText(self):
return self.displayName
def GetSubList(self):
ret = MakePathSubList(self.path)
if os.path.split(self.path)[1] == "win32com": # Complete and utter hack for win32com.
try:
path = win32api.GetFullPathName(os.path.join(self.path, "..\\win32comext"))
ret = ret + MakePathSubList(path)
except win32ui.error:
pass
return ret
class HLIProjectRoot(hierlist.HierListItem):
def __init__(self, projectName, displayName = None):
hierlist.HierListItem.__init__(self)
self.projectName = projectName
self.displayName = displayName or projectName
def GetText(self):
return self.displayName
def IsExpandable(self):
return 1
def GetSubList(self):
paths = regutil.GetRegisteredNamedPath(self.projectName)
pathList = string.split(paths,";")
if len(pathList)==1: # Single dir - dont bother putting the dir in
ret = MakePathSubList(pathList[0])
else:
ret = map( HLIDirectoryItem, pathList )
return ret
class HLIRoot(hierlist.HierListItem):
def __init__(self):
hierlist.HierListItem.__init__(self)
def IsExpandable(self):
return 1
def GetSubList(self):
keyStr = regutil.BuildDefaultPythonKey() + "\\PythonPath"
hKey = win32api.RegOpenKey(regutil.GetRootKey(), keyStr)
try:
ret = []
ret.append(HLIProjectRoot("", "Standard Python Library")) # The core path.
index = 0
while 1:
try:
ret.append(HLIProjectRoot(win32api.RegEnumKey(hKey, index)))
index = index + 1
except win32api.error:
break
return ret
finally:
win32api.RegCloseKey(hKey)
class dynamic_browser (dialog.Dialog):
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
cs = (
win32con.WS_CHILD |
win32con.WS_VISIBLE |
commctrl.TVS_HASLINES |
commctrl.TVS_LINESATROOT |
commctrl.TVS_HASBUTTONS
)
dt = [
["Python Projects", (0, 0, 200, 200), style, None, (8, "MS Sans Serif")],
["SysTreeView32", None, win32ui.IDC_LIST1, (0, 0, 200, 200), cs]
]
def __init__ (self, hli_root):
dialog.Dialog.__init__ (self, self.dt)
self.hier_list = hierlist.HierListWithItems (
hli_root,
win32ui.IDB_BROWSER_HIER
)
self.HookMessage (self.on_size, win32con.WM_SIZE)
def OnInitDialog (self):
self.hier_list.HierInit (self)
return dialog.Dialog.OnInitDialog (self)
def on_size (self, params):
lparam = params[3]
w = win32api.LOWORD(lparam)
h = win32api.HIWORD(lparam)
self.GetDlgItem (win32ui.IDC_LIST1).MoveWindow((0,0,w,h))
def BrowseDialog():
root = HLIRoot()
if not root.IsExpandable():
raise TypeError, "Browse() argument must have __dict__ attribute, or be a Browser supported type"
dlg = dynamic_browser (root)
dlg.CreateWindow()
def DockableBrowserCreator(parent):
root = HLIRoot()
hl = hierlist.HierListWithItems (
root,
win32ui.IDB_BROWSER_HIER
)
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS
control = win32ui.CreateTreeCtrl()
control.CreateWindow(style, (0, 0, 150, 300), parent, win32ui.IDC_LIST1)
list = hl.HierInit (parent, control)
return control
def DockablePathBrowser():
import pywin.docking.DockingBar
bar = pywin.docking.DockingBar.DockingBar()
bar.CreateWindow(win32ui.GetMainFrame(), DockableBrowserCreator, "Path Browser", 0x8e0a)
bar.SetBarStyle( bar.GetBarStyle()|afxres.CBRS_TOOLTIPS|afxres.CBRS_FLYBY|afxres.CBRS_SIZE_DYNAMIC)
bar.EnableDocking(afxres.CBRS_ALIGN_ANY)
win32ui.GetMainFrame().DockControlBar(bar)
# The "default" entry point
Browse = DockablePathBrowser
|
t3dev/odoo
|
refs/heads/master
|
addons/sale_coupon_delivery/models/__init__.py
|
14
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import sale_order
from . import sale_coupon_program
from . import sale_coupon_reward
|
pombredanne/teamwork
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/threading.py
|
730
|
"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import sleep as _sleep
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value = self._value + 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
break
self._block.wait(delay)
finally:
self._block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
|
blaedd/logsnarf
|
refs/heads/master
|
src/logsnarf/test/__init__.py
|
12133432
| |
LinuxIsCool/unearthed
|
refs/heads/master
|
Unearthed/__init__.py
|
12133432
| |
Lektorium-LLC/edx-platform
|
refs/heads/master
|
lms/djangoapps/badges/events/__init__.py
|
12133432
| |
robovm/robovm-studio
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/de/__init__.py
|
12133432
| |
jtg-gg/blink
|
refs/heads/dev12-m41
|
Tools/Scripts/webkitpy/formatter/__init__.py
|
12133432
| |
marc-sensenich/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/aireos/__init__.py
|
12133432
| |
MatthewWilkes/django
|
refs/heads/master
|
tests/http_utils/__init__.py
|
12133432
| |
awyrough/make-the-country
|
refs/heads/master
|
make-the-country/population_client.py
|
1
|
"""
BigQuery Client for Recreating US Census Data, census block by census block.
Alexander Penn Hill Wyrough
3/17/2015
https://github.com/awyrough/make-the-country
"""
# SYSTEM IMPORTS
import logging, os, httplib2
# GOOGLE IMPORTS
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
from googleapiclient.errors import HttpError
# SECRET IMPORTS
from secret import GOOGLE_PRIVATE_KEY_FILE, GOOGLE_DEVELOPER_PROJECT_NUMBER, GOOGLE_SERVICE_EMAIL, GOOGLE_DEVELOPER_PROJECT_ID
from upload_census_data.googlebigqueryclient import GoogleBigQueryClient, loadTable
from output_schema import population_output_schema
# LOGGING DECLARATION
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(levelname)s %(filename)s: %(message)s",
level=logging.WARNING)
class PopulationClient():
"""
Google BigQuery API Client for GDELT access and processing.
"""
def __init__(self):
"""
Build authorized Google Big Query API Client from project_id and project_number given project google service email.
"""
SCOPE = 'https://www.googleapis.com/auth/bigquery'
self.get_private_key()
credentials = SignedJwtAssertionCredentials(GOOGLE_SERVICE_EMAIL, self.private_key, SCOPE)
http = httplib2.Http()
self.http = credentials.authorize(http)
self.client = build('bigquery', 'v2', http=self.http)
def get_private_key(self):
"""
Read from *.pem private key file. File path specified in secrets.py.
"""
BASE = os.path.dirname(os.path.abspath(__file__))
f = file(os.path.join(BASE, GOOGLE_PRIVATE_KEY_FILE), 'rb')
self.private_key = f.read()
f.close()
def test_access(self):
"""
Small response test to test authorized client. Working as of 02/25/15.
"""
datasets = self.client.datasets()
response = datasets.list(projectId=GOOGLE_DEVELOPER_PROJECT_NUMBER, all="true").execute()
print(response)
print('dataset list: \n')
for d in response['datasets']:
print("%s\n" % d['id'])
def run_query(self, query_str):
"""
Execute query_str and return results as list of lists.
"""
query_body = {'query': query_str}
# EXECUTE THE QUERY, KICK OFF JOB
try:
response = self.client.jobs().query(projectId=GOOGLE_DEVELOPER_PROJECT_NUMBER, body=query_body).execute()
except HttpError as e:
logger.error("[BigQuery SQL Error]: %s", e)
return []
# GATHER QUERY AND JOB METADATA
try:
numRows = response['totalRows']
jobId = response['jobReference']['jobId']
projectId = response['jobReference']['projectId']
jobComplete = response['jobComplete']
except KeyError as e:
logger.error("[The result didn't return anything]: ERROR: %s", e)
print(response)
return []
# ACCESS THE JOB OF THE QUERY, TO POLL FOR RESULTS
try:
job = self.client.jobs().get(projectId=GOOGLE_DEVELOPER_PROJECT_NUMBER, jobId=jobId).execute()
except HttpError as e:
logger.error("[BigQuery SQL Execute Job Error]: %s", e)
return []
getQueryResultsParams = {
"projectId": GOOGLE_DEVELOPER_PROJECT_NUMBER,
"jobId": jobId,
"maxResults": 1000, #ARBITRARY
}
data_results = []
while True:
try:
results = self.client.jobs().getQueryResults(**getQueryResultsParams).execute()
except HttpError as e:
logger.error("[Big Query SQL Query Polling Error]: %s", e)
return []
try:
[data_results.append(row) for row in results["rows"]]
except KeyError:
logger.error("[No rows returned]")
break
if 'pageToken' in results: getQueryResultsParams["pageToken"] = results["pageToken"]
else: break
return data_results
def get_query_string(self, db, state, county, tract):
"""
Return query string for given database and state.
"""
tract_query = "SELECT * FROM [%s.%s_%s] WHERE TRACT IN (\"%s\") AND COUNTY IN (\"%s\")" % (state, state, db, tract, county)
return tract_query
def get_income_query(self, state, county, tract):
"""
Return query string for income database of state.
"""
income_query = "SELECT * FROM [%s.%s_income] WHERE GEO_id2 LIKE \"%%%s%s\"" % (state, state, county, tract)
return income_query
def get_tract_data(self, db_type, county_id, tract_id, state_name):
"""
Return all rows from DEMO data base of state, corresponding to tract_id and state_id.
"""
if db_type != "income":
query_str = self.get_query_string(db_type, state_name, county_id, tract_id)
else:
query_str = self.get_income_query(state_name, county_id, tract_id)
return self.run_query(query_str)
def get_all_tracts_in_county(self, state, county):
"""
Return a list of strings for all tracts within the county within the state.
Note: BigQuery does support DISTINCT, must use GROUP BY
"""
if county:
query_str = "SELECT TRACT FROM [%s.%s_demo] WHERE COUNTY IN (\"%s\") GROUP BY TRACT ORDER BY TRACT ASC" % (state, state, county)
else:
query_str = "SELECT TRACT FROM [%s.%s_demo] GROUP BY TRACT ORDER BY TRACT ASC" % (state, state)
# Extract only the TRACT ID from the field: value = census tract
# EX: [{"f": [{"v": "000100"}]}, ... ]
return [x["f"][0]["v"] for x in self.run_query(query_str)]
def get_counties_in_state(self, state):
"""
Return a list of strings for all counties within the state.
"""
query_str = "SELECT COUNTY FROM [%s.%s_demo] GROUP BY COUNTY ORDER BY COUNTY ASC" % (state, state)
return [x["f"][0]["v"] for x in self.run_query(query_str)]
def get_demo_data(self, state, county, tract):
"""
Get all Census data for given state, county, and tract.
This should return a row for every census block within the tract/county/state combo.
This should return an identical number of rows for corresponding group/family queries.
"""
demo = self.get_tract_data("demo", county, tract, state)
return demo
def get_group_data(self, state, county, tract):
"""
Get group data for a given state, county, and tract.
This should return a row for every census block within the tract/county/state combo.
This should return an identical number of rows for corresponding demo/family queries.
"""
group = self.get_tract_data("group", county, tract, state)
return group
def get_family_data(self, state, county, tract):
"""
Get family data for a given state, county, and tract.
This should return a row for every census block within the tract/county/state combo.
This should return an identical number of rows for corresponding demo/group queries.
"""
family = self.get_tract_data("family", county, tract, state)
return family
def get_income_data(self, state, county, tract):
"""
Get income data for a given state, county, and tract.
This should return 1 row/list of data.
"""
income = self.get_tract_data("income", county, tract, state)
return income
def get_all_tracts(self, state, county):
"""
Return all tracts within a county.
"""
tracts = self.get_all_tracts_in_county(state, county)
return tracts
def get_all_counties(self, state):
"""
Return all counties within a state.
"""
counties = self.get_counties_in_state(state)
return counties
def get_population_count(self, state, county):
"""
Return the population in a given county.
"""
query_str = "SELECT SUM(POP100) FROM [%s.%s_demo] WHERE COUNTY IN (\"%s\")" % (state, state, county)
return int(self.run_query(query_str)[0]["f"][0]["v"])
#################################################################################################
"""
Below are standalone methods that do not require the use of a Google BigQuery client.
Cleaner methods that are more simpler, better for one off.
My attempt at overloading.
"""
#################################################################################################
def get_demo_data(state, county, tract):
"""
Get all Census data for given state, county, and tract.
This should return a row for every census block within the tract/county/state combo.
This should return an identical number of rows for corresponding group/family queries.
"""
client = PopulationClient()
demo = client.get_tract_data("demo", county, tract, state)
return demo
def get_group_data(state, county, tract):
"""
Get group data for a given state, county, and tract.
This should return a row for every census block within the tract/county/state combo.
This should return an identical number of rows for corresponding demo/family queries.
"""
client = PopulationClient()
group = client.get_tract_data("group", county, tract, state)
return group
def get_family_data(state, county, tract):
"""
Get family data for a given state, county, and tract.
This should return a row for every census block within the tract/county/state combo.
This should return an identical number of rows for corresponding demo/group queries.
"""
client = PopulationClient()
family = client.get_tract_data("family", county, tract, state)
return family
def get_income_data(state, county, tract):
"""
Get income data for a given state, county, and tract.
This should return 1 row/list of data.
"""
client = PopulationClient()
income = client.get_tract_data("income", county, tract, state)
return income
def get_all_tracts(state, county):
"""
Return all tracts within a county.
"""
client = PopulationClient()
tracts = client.get_all_tracts_in_county(state, county)
return tracts
def get_all_counties(state):
"""
Return all counties within a state.
"""
client = PopulationClient()
counties = client.get_counties_in_state(state)
return counties
def unpack_bigquery_row(data):
return [x["v"] for x in data["f"]]
def load_population_result(filename):
"""
Create table in population_output dataset for the name perscibed by filename.
"""
actual_name = filename.split("/")[2]
state = filename.split("/")[1]
gs_location = "gs://population-output/" + state + "/" + actual_name
dataset = "population_output"
table = actual_name.split(".")[0]
client = GoogleBigQueryClient()
loadTable(client.client, GOOGLE_DEVELOPER_PROJECT_ID, dataset, table, gs_location, population_output_schema)
def main():
pass
filename = "population-output/new_jersey/new_jersey_BHETWX_salem.csv"
load_population_result(filename)
if __name__ == "__main__":
main()
|
jameshiew/mws
|
refs/heads/master
|
mws/future_utils/__init__.py
|
1
|
from .collections import unique_list_order_preserved
from .crypto import calc_md5
from .params import (
clean_bool,
clean_date,
clean_string,
clean_value,
dict_keyed_param,
enumerate_keyed_param,
enumerate_param,
enumerate_params,
flat_param_dict,
)
from .parsers import (
DataWrapper,
DictWrapper,
ObjectDict,
XML2Dict,
)
from .timezone import mws_utc_now
__all__ = [
"calc_md5",
"clean_bool",
"clean_date",
"clean_string",
"clean_value",
"DataWrapper",
"dict_keyed_param",
"DictWrapper",
"enumerate_keyed_param",
"enumerate_param",
"enumerate_params",
"flat_param_dict",
"mws_utc_now",
"ObjectDict",
"unique_list_order_preserved",
"XML2Dict",
]
|
affo/nova
|
refs/heads/master
|
nova/cmd/serialproxy.py
|
4
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Websocket proxy that is compatible with OpenStack Nova
Serial consoles. Leverages websockify.py by Joel Martin.
Based on nova-novncproxy.
"""
from oslo_config import cfg
from nova.cmd import baseproxy
opts = [
cfg.StrOpt('serialproxy_host',
default='0.0.0.0',
help='Host on which to listen for incoming requests'),
cfg.IntOpt('serialproxy_port',
default=6083,
help='Port on which to listen for incoming requests'),
]
CONF = cfg.CONF
CONF.register_cli_opts(opts, group="serial_console")
def main():
# set default web flag option
CONF.set_default('web', None)
baseproxy.proxy(
host=CONF.serial_console.serialproxy_host,
port=CONF.serial_console.serialproxy_port)
|
veasy/easy-remote-control
|
refs/heads/master
|
erc-server/erc_server/sockets/keyboard_socket.py
|
1
|
from pyunicon.UCScreen import UCScreen
from erc_server import socketio
from flask.ext.socketio import emit
from pyunicon.UCMouse import UCMouse
from erc_server.erc_util import get_key_by_text, press_key
__author__ = 'cansik'
DEFAULT_NAMESPACE = '/text'
__mouse = UCMouse()
__screen = UCScreen()
@socketio.on('connect', namespace=DEFAULT_NAMESPACE)
def connect():
print('Client connected')
@socketio.on('char', namespace=DEFAULT_NAMESPACE)
def char(key):
key_code = get_key_by_text(key.upper())
press_key(key_code)
print 'Received char: %s' % key
@socketio.on('disconnect', namespace=DEFAULT_NAMESPACE)
def disconnect():
print('Client disconnected')
|
KiCad/kicad-python
|
refs/heads/master
|
doc/source/conf.py
|
4
|
# -*- coding: utf-8 -*-
#
# KiCad Python API documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 23 20:40:38 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ytemplates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'KiCad Python API'
copyright = u'2015, KiCad Community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['ystatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'KiCadPythonAPIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KiCadPythonAPI.tex', u'KiCad Python API Documentation',
u'KiCad Community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kicadpythonapi', u'KiCad Python API Documentation',
[u'KiCad Community'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KiCadPythonAPI', u'KiCad Python API Documentation',
u'KiCad Community', 'KiCadPythonAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'KiCad Python API'
epub_author = u'KiCad Community'
epub_publisher = u'KiCad Community'
epub_copyright = u'2015, KiCad Community'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'KiCad Python API'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
sourcesimian/pySkype
|
refs/heads/master
|
skype/__init__.py
|
12133432
| |
RevelSystems/django
|
refs/heads/master
|
tests/m2m_signals/__init__.py
|
12133432
| |
b0ttl3z/SickRage
|
refs/heads/master
|
sickbeard/providers/kat.py
|
5
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
# URL: http://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import validators
from requests.compat import urljoin
import sickbeard
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class KatProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "KickAssTorrents")
self.public = True
self.confirmed = True
self.minseed = None
self.minleech = None
self.url = "https://kat.cr"
self.urls = {"search": urljoin(self.url, "%s/")}
self.custom_url = None
self.cache = tvcache.TVCache(self, search_params={"RSS": ["tv", "anime"]})
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
anime = (self.show and self.show.anime) or (ep_obj and ep_obj.show and ep_obj.show.anime) or False
search_params = {
"q": "",
"field": "seeders",
"sorder": "desc",
"rss": 1,
"category": ("tv", "anime")[anime]
}
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_params["q"] = search_string if mode != "RSS" else ""
search_params["field"] = "seeders" if mode != "RSS" else "time_add"
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.urls["search"] % ("usearch" if mode != "RSS" else search_string)
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
data = self.get_url(search_url, params=search_params, returns="text")
if not data:
logger.log("URL did not return results/data, if the results are on the site maybe try a custom url, or a different one", logger.DEBUG)
continue
if not data.startswith("<?xml"):
logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
continue
with BS4Parser(data, "html5lib") as html:
for item in html("item"):
try:
title = item.title.get_text(strip=True)
# Use the torcache link kat provides,
# unless it is not torcache or we are not using blackhole
# because we want to use magnets if connecting direct to client
# so that proxies work.
download_url = item.enclosure["url"]
if sickbeard.TORRENT_METHOD != "blackhole" or "torcache" not in download_url:
download_url = item.find("torrent:magneturi").next.replace("CDATA", "").strip("[!]") + self._custom_trackers
if not (title and download_url):
continue
seeders = try_int(item.find("torrent:seeds").get_text(strip=True))
leechers = try_int(item.find("torrent:peers").get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
verified = bool(try_int(item.find("torrent:verified").get_text(strip=True)))
if self.confirmed and not verified:
if mode != "RSS":
logger.log("Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
torrent_size = item.find("torrent:contentlength").get_text(strip=True)
size = convert_size(torrent_size) or -1
info_hash = item.find("torrent:infohash").get_text(strip=True)
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = KatProvider()
|
kressi/erpnext
|
refs/heads/develop
|
erpnext/patches/v7_1/add_account_user_role_for_timesheet.py
|
52
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.get_value('DocPerm', {'parent': 'Timesheet', 'role': 'Accounts User', 'permlevel': 1}):
doc = frappe.get_doc('DocType', 'Timesheet')
doc.append('permissions', {
'role': "Accounts User",
'permlevel': 0,
'read': 1,
'write': 1,
'create': 1,
'delete': 1,
'submit': 1,
'cancel': 1,
'amend': 1,
'report': 1,
'email': 1
})
doc.append('permissions', {
'role': "Accounts User",
'permlevel': 1,
'read': 1,
'write': 1
})
doc.save(ignore_permissions=True)
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/multiLine/IndentInnerFunction.after.py
|
83
|
class C:
def foo(self):
def foo(self):
x = 1
y = 2
y = 2
|
billonahill/heron
|
refs/heads/master
|
integration-test/src/python/integration_test/topology/all_grouping/__init__.py
|
8
|
"""All grouping integration test topology"""
__all__ = ['all_grouping']
from .all_grouping import all_grouping_buidler
|
h4r5h1t/django-hauthy
|
refs/heads/hauthy
|
django/utils/itercompat.py
|
712
|
"""
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
|
vicaya/hypertable
|
refs/heads/master
|
src/py/ThriftClient/client_test.py
|
2
|
import sys
from hypertable.thriftclient import *
from hyperthrift.gen.ttypes import *
try:
client = ThriftClient("localhost", 38080)
print "HQL examples"
res = client.hql_query("show tables")
print res
res = client.hql_query("select * from thrift_test")
print res
print "mutator examples";
mutator = client.open_mutator("thrift_test", 0, 0);
client.set_cell(mutator, Cell("py-k1", "col", None, "py-v1"))
client.flush_mutator(mutator);
print "scanner examples";
scanner = client.open_scanner("thrift_test",
ScanSpec(None, None, None, 1), True);
while True:
cells = client.next_cells(scanner)
if (len(cells) == 0):
break
print cells
except:
print sys.exc_info()
raise
|
zchking/odoo
|
refs/heads/8.0
|
addons/l10n_in_hr_payroll/report/report_payslip_details.py
|
374
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.osv import osv
from openerp.addons.hr_payroll import report
class payslip_details_report_in(report.report_payslip_details.payslip_details_report):
def __init__(self, cr, uid, name, context):
super(payslip_details_report_in, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_details_by_rule_category': self.get_details_by_rule_category,
})
class wrapped_report_payslipdetailsin(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payslipdetails'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payslipdetails'
_wrapped_report_class = payslip_details_report_in
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
40223250/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/support.py
|
829
|
import sys
def is_instance(obj, klass):
"""Version of is_instance that doesn't access __class__"""
return issubclass(type(obj), klass)
class SomeClass(object):
class_attribute = None
def wibble(self):
pass
class X(object):
pass
def examine_warnings(func):
def wrapper():
with catch_warnings(record=True) as ws:
func(ws)
return wrapper
|
ESS-LLP/erpnext
|
refs/heads/develop
|
erpnext/manufacturing/doctype/manufacturing_settings/__init__.py
|
12133432
| |
edevil/django
|
refs/heads/master
|
tests/absolute_url_overrides/__init__.py
|
12133432
| |
sveinugu/gtrackcore
|
refs/heads/master
|
gtrackcore/test/track/random/__init__.py
|
12133432
| |
ormnv/os_final_project
|
refs/heads/master
|
django/contrib/gis/db/backends/spatialite/__init__.py
|
12133432
| |
yasoob/PythonRSSReader
|
refs/heads/master
|
venv/lib/python2.7/dist-packages/reportlab/platypus/tableofcontents.py
|
1
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tableofcontents.py
__version__=''' $Id$ '''
__doc__="""Experimental class to generate Tables of Contents easily
This module defines a single TableOfContents() class that can be used to
create automatically a table of tontents for Platypus documents like
this:
story = []
toc = TableOfContents()
story.append(toc)
# some heading paragraphs here...
doc = MyTemplate(path)
doc.multiBuild(story)
The data needed to create the table is a list of (level, text, pageNum)
triplets, plus some paragraph styles for each level of the table itself.
The triplets will usually be created in a document template's method
like afterFlowable(), making notification calls using the notify()
method with appropriate data like this:
(level, text, pageNum) = ...
self.notify('TOCEntry', (level, text, pageNum))
Optionally the list can contain four items in which case the last item
is a destination key which the entry should point to. A bookmark
with this key needs to be created first like this:
key = 'ch%s' % self.seq.nextf('chapter')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (level, text, pageNum, key))
As the table of contents need at least two passes over the Platypus
story which is why the moultiBuild0() method must be called.
The level<NUMBER>ParaStyle variables are the paragraph styles used
to format the entries in the table of contents. Their indentation
is calculated like this: each entry starts at a multiple of some
constant named delta. If one entry spans more than one line, all
lines after the first are indented by the same constant named
epsilon.
"""
from reportlab.lib import enums
from reportlab.lib.units import cm
from reportlab.lib.utils import commasplit, escapeOnce, encode_label, decode_label
from reportlab.lib.styles import ParagraphStyle, _baseFontName
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import IndexingFlowable
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.flowables import Spacer, Flowable
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.pdfgen import canvas
def unquote(txt):
from xml.sax.saxutils import unescape
return unescape(txt, {"'": "'", """: '"'})
try:
set
except:
class set(list):
def add(self,x):
if x not in self:
list.append(self,x)
def drawPageNumbers(canvas, style, pages, availWidth, availHeight, dot=' . '):
'''
Draws pagestr on the canvas using the given style.
If dot is None, pagestr is drawn at the current position in the canvas.
If dot is a string, pagestr is drawn right-aligned. If the string is not empty,
the gap is filled with it.
'''
pages.sort()
pagestr = ', '.join([str(p) for p, _ in pages])
x, y = canvas._curr_tx_info['cur_x'], canvas._curr_tx_info['cur_y']
fontSize = style.fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
#if it's too long to fit, we need to shrink to fit in 10% increments.
#it would be very hard to output multiline entries.
#however, we impose a minimum size of 1 point as we don't want an
#infinite loop. Ultimately we should allow a TOC entry to spill
#over onto a second line if needed.
freeWidth = availWidth-x
while pagestrw > freeWidth and fontSize >= 1.0:
fontSize = 0.9 * fontSize
pagestrw = stringWidth(pagestr, style.fontName, fontSize)
if isinstance(dot, str):
if dot:
dotw = stringWidth(dot, style.fontName, fontSize)
dotsn = int((availWidth-x-pagestrw)/dotw)
else:
dotsn = dotw = 0
text = '%s%s' % (dotsn * dot, pagestr)
newx = availWidth - dotsn*dotw - pagestrw
pagex = availWidth - pagestrw
elif dot is None:
text = ', ' + pagestr
newx = x
pagex = newx
else:
raise TypeError('Argument dot should either be None or an instance of basestring.')
tx = canvas.beginText(newx, y)
tx.setFont(style.fontName, fontSize)
tx.setFillColor(style.textColor)
tx.textLine(text)
canvas.drawText(tx)
commaw = stringWidth(', ', style.fontName, fontSize)
for p, key in pages:
if not key:
continue
w = stringWidth(str(p), style.fontName, fontSize)
canvas.linkRect('', key, (pagex, y, pagex+w, y+style.leading), relative=1)
pagex += w + commaw
# Default paragraph styles for tables of contents.
# (This could also be generated automatically or even
# on-demand if it is not known how many levels the
# TOC will finally need to display...)
delta = 1*cm
epsilon = 0.5*cm
defaultLevelStyles = [
ParagraphStyle(
name='Level 0',
fontName=_baseFontName,
fontSize=10,
leading=11,
firstLineIndent = 0,
leftIndent = epsilon)]
defaultTableStyle = \
TableStyle([
('VALIGN', (0,0), (-1,-1), 'TOP'),
('RIGHTPADDING', (0,0), (-1,-1), 0),
('LEFTPADDING', (0,0), (-1,-1), 0),
])
class TableOfContents(IndexingFlowable):
"""This creates a formatted table of contents.
It presumes a correct block of data is passed in.
The data block contains a list of (level, text, pageNumber)
triplets. You can supply a paragraph style for each level
(starting at zero).
Set dotsMinLevel to determine from which level on a line of
dots should be drawn between the text and the page number.
If dotsMinLevel is set to a negative value, no dotted lines are drawn.
"""
def __init__(self):
self.rightColumnWidth = 72
self.levelStyles = defaultLevelStyles
self.tableStyle = defaultTableStyle
self.dotsMinLevel = 1
self._table = None
self._entries = []
self._lastEntries = []
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries[:]
self.clearEntries()
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'TOCEntry' events only.
"""
if kind == 'TOCEntry':
self.addEntry(*stuff)
def clearEntries(self):
self._entries = []
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
try:
return self.levelStyles[n]
except IndexError:
prevstyle = self.getLevelStyle(n-1)
self.levelStyles.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+delta,
leftIndent = prevstyle.leftIndent+delta))
return self.levelStyles[n]
def addEntry(self, level, text, pageNum, key=None):
"""Adds one entry to the table of contents.
This allows incremental buildup by a doctemplate.
Requires that enough styles are defined."""
assert type(level) == type(1), "Level must be an integer"
self._entries.append((level, text, pageNum, key))
def addEntries(self, listOfEntries):
"""Bulk creation of entries in the table of contents.
If you knew the titles but not the page numbers, you could
supply them to get sensible output on the first run."""
for entryargs in listOfEntries:
self.addEntry(*entryargs)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0,'Placeholder for table of contents',0,None)]
else:
_tempEntries = self._lastEntries
def drawTOCEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
label = label.split(',')
page, level, key = int(label[0]), int(label[1]), eval(label[2],{})
style = self.getLevelStyle(level)
if self.dotsMinLevel >= 0 and level >= self.dotsMinLevel:
dot = ' . '
else:
dot = ''
drawPageNumbers(canvas, style, [(page, key)], availWidth, availHeight, dot)
self.canv.drawTOCEntryEnd = drawTOCEntryEnd
tableData = []
for (level, text, pageNum, key) in _tempEntries:
style = self.getLevelStyle(level)
if key:
text = '<a href="#%s">%s</a>' % (key, text)
keyVal = repr(key).replace(',','\\x2c').replace('"','\\x2c')
else:
keyVal = None
para = Paragraph('%s<onDraw name="drawTOCEntryEnd" label="%d,%d,%s"/>' % (text, pageNum, level, keyVal), style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
self._table = Table(tableData, colWidths=(availWidth,), style=self.tableStyle)
self.width, self.height = self._table.wrapOn(self.canv,availWidth, availHeight)
return (self.width, self.height)
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._table.splitOn(self.canv,availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._table.drawOn(canvas, x, y, _sW)
def makeTuple(x):
if hasattr(x, '__iter__'):
return tuple(x)
return (x,)
class SimpleIndex(IndexingFlowable):
"""Creates multi level indexes.
The styling can be cutomized and alphabetic headers turned on and off.
"""
def __init__(self, **kwargs):
"""
Constructor of SimpleIndex.
Accepts the same arguments as the setup method.
"""
#keep stuff in a dictionary while building
self._entries = {}
self._lastEntries = {}
self._flowable = None
self.setup(**kwargs)
def getFormatFunc(self,format):
try:
D = {}
exec('from reportlab.lib.sequencer import _format_%s as formatFunc' % format, D)
return D['formatFunc']
except ImportError:
raise ValueError('Unknown format %r' % format)
def setup(self, style=None, dot=None, tableStyle=None, headers=True, name=None, format='123', offset=0):
"""
This method makes it possible to change styling and other parameters on an existing object.
style is the paragraph style to use for index entries.
dot can either be None or a string. If it's None, entries are immediatly followed by their
corresponding page numbers. If it's a string, page numbers are aligned on the right side
of the document and the gap filled with a repeating sequence of the string.
tableStyle is the style used by the table which the index uses to draw itself. Use this to
change properties like spacing between elements.
headers is a boolean. If it is True, alphabetic headers are displayed in the Index when the first
letter changes. If False, we just output some extra space before the next item
name makes it possible to use several indexes in one document. If you want this use this
parameter to give each index a unique name. You can then index a term by refering to the
name of the index which it should appear in:
<index item="term" name="myindex" />
format can be 'I', 'i', '123', 'ABC', 'abc'
"""
if style is None:
style = ParagraphStyle(name='index',
fontName=_baseFontName,
fontSize=11)
self.textStyle = style
self.tableStyle = tableStyle or defaultTableStyle
self.dot = dot
self.headers = headers
if name is None:
from reportlab.platypus.paraparser import DEFAULT_INDEX_NAME as name
self.name = name
self.formatFunc = self.getFormatFunc(format)
self.offset = offset
def __call__(self,canv,kind,label):
try:
terms, format, offset = decode_label(label)
except:
terms = label
format = offset = None
if format is None:
formatFunc = self.formatFunc
else:
formatFunc = self.getFormatFunc(format)
if offset is None:
offset = self.offset
terms = commasplit(terms)
pns = formatFunc(canv.getPageNumber()-offset)
key = 'ix_%s_%s_p_%s' % (self.name, label, pns)
info = canv._curr_tx_info
canv.bookmarkHorizontal(key, info['cur_x'], info['cur_y'] + info['leading'])
self.addEntry(terms, pns, key)
def getCanvasMaker(self, canvasmaker=canvas.Canvas):
def newcanvasmaker(*args, **kwargs):
from reportlab.pdfgen import canvas
c = canvasmaker(*args, **kwargs)
setattr(c,self.name,self)
return c
return newcanvasmaker
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries.copy()
self.clearEntries()
def clearEntries(self):
self._entries = {}
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'IndexEntry' events only.
"""
if kind == 'IndexEntry':
(text, pageNum) = stuff
self.addEntry(text, pageNum)
def addEntry(self, text, pageNum, key=None):
"""Allows incremental buildup"""
self._entries.setdefault(makeTuple(text),set([])).add((pageNum, key))
def split(self, availWidth, availHeight):
"""At this stage we do not care about splitting the entries,
we will just return a list of platypus tables. Presumably the
calling app has a pointer to the original TableOfContents object;
Platypus just sees tables.
"""
return self._flowable.splitOn(self.canv,availWidth, availHeight)
def _getlastEntries(self, dummy=[(['Placeholder for index'],enumerate((None,)*3))]):
'''Return the last run's entries! If there are none, returns dummy.'''
if not self._lastEntries:
if self._entries:
return list(self._entries.items())
return dummy
return list(self._lastEntries.items())
def _build(self,availWidth,availHeight):
_tempEntries = self._getlastEntries()
def getkey(seq):
return [x.upper() for x in seq[0]]
_tempEntries.sort(key=getkey)
leveloffset = self.headers and 1 or 0
def drawIndexEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
style = self.getLevelStyle(leveloffset)
pages = decode_label(label)
drawPageNumbers(canvas, style, pages, availWidth, availHeight, self.dot)
self.canv.drawIndexEntryEnd = drawIndexEntryEnd
alpha = ''
tableData = []
lastTexts = []
alphaStyle = self.getLevelStyle(0)
for texts, pageNumbers in _tempEntries:
texts = list(texts)
#track when the first character changes; either output some extra
#space, or the first letter on a row of its own. We cannot do
#widow/orphan control, sadly.
nalpha = texts[0][0].upper()
if alpha != nalpha:
alpha = nalpha
if self.headers:
header = alpha
else:
header = ' '
tableData.append([Spacer(1, alphaStyle.spaceBefore),])
tableData.append([Paragraph(header, alphaStyle),])
tableData.append([Spacer(1, alphaStyle.spaceAfter),])
i, diff = listdiff(lastTexts, texts)
if diff:
lastTexts = texts
texts = texts[i:]
label = encode_label(list(pageNumbers))
texts[-1] = '%s<onDraw name="drawIndexEntryEnd" label="%s"/>' % (texts[-1], label)
for text in texts:
#Platypus and RML differ on how parsed XML attributes are escaped.
#e.g. <index item="M&S"/>. The only place this seems to bite us is in
#the index entries so work around it here.
text = escapeOnce(text)
style = self.getLevelStyle(i+leveloffset)
para = Paragraph(text, style)
if style.spaceBefore:
tableData.append([Spacer(1, style.spaceBefore),])
tableData.append([para,])
i += 1
self._flowable = Table(tableData, colWidths=[availWidth], style=self.tableStyle)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
self._build(availWidth,availHeight)
self.width, self.height = self._flowable.wrapOn(self.canv,availWidth, availHeight)
return self.width, self.height
def drawOn(self, canvas, x, y, _sW=0):
"""Don't do this at home! The standard calls for implementing
draw(); we are hooking this in order to delegate ALL the drawing
work to the embedded table object.
"""
self._flowable.drawOn(canvas, x, y, _sW)
def draw(self):
t = self._flowable
ocanv = getattr(t,'canv',None)
if not ocanv:
t.canv = self.canv
try:
t.draw()
finally:
if not ocanv:
del t.canv
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
if not hasattr(self.textStyle, '__iter__'):
self.textStyle = [self.textStyle]
try:
return self.textStyle[n]
except IndexError:
self.textStyle = list(self.textStyle)
prevstyle = self.getLevelStyle(n-1)
self.textStyle.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+.2*cm,
leftIndent = prevstyle.leftIndent+.2*cm))
return self.textStyle[n]
AlphabeticIndex = SimpleIndex
def listdiff(l1, l2):
m = min(len(l1), len(l2))
for i in range(m):
if l1[i] != l2[i]:
return i, l2[i:]
return m, l2[m:]
class ReferenceText(IndexingFlowable):
"""Fakery to illustrate how a reference would work if we could
put it in a paragraph."""
def __init__(self, textPattern, targetKey):
self.textPattern = textPattern
self.target = targetKey
self.paraStyle = ParagraphStyle('tmp')
self._lastPageNum = None
self._pageNum = -999
self._para = None
def beforeBuild(self):
self._lastPageNum = self._pageNum
def notify(self, kind, stuff):
if kind == 'Target':
(key, pageNum) = stuff
if key == self.target:
self._pageNum = pageNum
def wrap(self, availWidth, availHeight):
text = self.textPattern % self._lastPageNum
self._para = Paragraph(text, self.paraStyle)
return self._para.wrap(availWidth, availHeight)
def drawOn(self, canvas, x, y, _sW=0):
self._para.drawOn(canvas, x, y, _sW)
|
lheagy/casingResearch
|
refs/heads/master
|
tests/test_SimulationRun.py
|
2
|
import unittest
import numpy as np
import os
import shutil
import casingSimulations
plotIt = False
TOL = 1e-4
ZERO = 1e-7
class ForwardSimulationTestCyl2D(unittest.TestCase):
dir2D = './sim2D'
def setUp(self):
sigma_back = 1e-1 # wholespace
modelParameters = casingSimulations.model.CasingInWholespace(
src_a = np.r_[0., np.pi, 0.], # the source fcts will take care of coupling it to the casing
src_b = np.r_[1e3, np.pi, 0.], # return electrode
freqs = np.r_[0.5],
sigma_back = sigma_back, # wholespace
)
npadx, npadz = 8, 19
dx2 = 200.
csz = 0.25
meshGenerator = casingSimulations.CasingMeshGenerator(
modelParameters=modelParameters, npadx=npadx, npadz=npadz, csz=csz
)
self.modelParameters = modelParameters
self.meshGenerator = meshGenerator
def runSimulation(self, src):
simulation = casingSimulations.run.SimulationFDEM(
modelParameters=self.modelParameters,
meshGenerator=self.meshGenerator,
src=src,
directory=self.dir2D
)
fields2D = simulation.run()
loadedFields = np.load('/'.join([self.dir2D, 'fields.npy']))
self.assertTrue(np.all(fields2D[:, 'h'] == loadedFields))
def test_simulation2DTopCasing(self):
src = casingSimulations.sources.TopCasingSrc(
modelParameters=self.modelParameters,
meshGenerator=self.meshGenerator,
)
src.validate()
self.runSimulation(src)
def test_simulation2DDownHoleCasingSrc(self):
src = casingSimulations.sources.DownHoleCasingSrc(
modelParameters=self.modelParameters,
meshGenerator=self.meshGenerator,
)
src.validate()
self.runSimulation(src)
def test_simulation2DDownHoleTerminatingSrc(self):
src = casingSimulations.sources.DownHoleTerminatingSrc(
modelParameters=self.modelParameters,
meshGenerator=self.meshGenerator,
)
src.validate()
self.runSimulation(src)
def tearDown(self):
for d in [self.dir2D]:
if os.path.isdir(d):
shutil.rmtree(d)
if __name__ == '__main__':
unittest.main()
|
amaringarcia/AliPhysics
|
refs/heads/master
|
PWGMM/MC/aligenqa/aligenqa/plotting.py
|
41
|
from pprint import pprint
from rootpy import asrootpy, log, collection
from rootpy.plotting import Hist2D
from rootpy.io import root_open
from data_extractors import \
get_dNdeta_in_classifier_bin_interval,\
get_identified_vs_mult,\
get_correlation_histogram,\
get_PNch_vs_estmult,\
get_meanpt_vs_estmult,\
get_pT_distribution,\
get_mean_nMPI,\
get_graphs_particle_ratios_vs_refmult
from utils import \
gen_random_name,\
get_est_dirs,\
make_estimator_title,\
remap_x_values,\
remove_zero_value_points,\
remove_points_with_equal_x,\
remove_points_with_x_err_gt_1NchRef,\
percentile_bin_to_binidx_bin
from .roofie import Figure, Styles
kPROTON = str(2212)
kANTIPROTON = str(-2212)
kLAMBDA = str(3122)
kANTILAMBDA = str(-3122)
kK0S = str(310)
kKPLUS = str(321)
kKMINUS = str(-321)
kPIPLUS = str(211)
kPIMINUS = str(-211)
kPI0 = str(111)
kXI = str(3312)
kANTIXI = str(-3312)
kOMEGAMINUS = str(3334)
kOMEGAPLUS = str(-3334)
class Plotting(object):
def __init__(self, f_name, sums_dir_name, results_dir_name, percentile_bins, considered_ests):
self.f_name = f_name
self.sums_dir_name = sums_dir_name
self.results_dir_name = results_dir_name
# use the last mult bin starts at a multiplicity x times larger than the mean in this estimator
# self.mean_mult_cutoff_factor = 4
self.ref_ests = ['EtaLt05', ]
self.considered_ests = considered_ests
self.perc_bins = percentile_bins
# figure out the nch edges corresponding to the percentile edges, depends on P(Nch)
self.delete_results_dir()
self.make_results_dir()
self.plot_event_counters() # needed for calculations of the edges
self.nch_edges = self._find_nch_edges_from_percentile_edges()
pprint(self.nch_edges)
# set the default style for all figures created from her on forward:
Figure.style = Styles.Presentation_half
def _io_decorator(func):
"""
Open and close the file befor and after the execution of the decorated function.
The purpose ist to clean up memory in this way and to force an update of the file
before the next function calls. The wrapper adds the file, sums and results_post to `self`.
"""
def wrapper(self, **kwargs):
with root_open(self.f_name, 'update') as self.f:
self.sums = self.f.MultEstimators.__getattr__(self.sums_dir_name)
try:
self.results_post = self.f.MultEstimators.__getattr__(self.results_dir_name)
except AttributeError:
# results dir does not exists (yet)
pass
return_value = func(self, **kwargs)
# Delete all TLists in sums since we own them and they would be left in memory otherwise
for obj in self.sums:
if isinstance(obj, collection.List):
obj.Delete()
self.sums.Delete()
return return_value
return wrapper
@_io_decorator
def _find_nch_edges_from_percentile_edges(self):
nch_edges = {}
estimators_to_be_removed = []
for est_dir in get_est_dirs(self.results_post, self.considered_ests):
event_counter = est_dir.event_counter
try:
nch_edges[est_dir.GetName()] = [percentile_bin_to_binidx_bin(perc_bin, event_counter)
for perc_bin in self.perc_bins[est_dir.GetName()]]
except ValueError, e:
print "Error occured for classifier " + est_dir.GetName()
print e
print self.perc_bins[est_dir.GetName()]
print "You can change the percentile bins in the beginning of this script"
print "For the following, this estimator is removed"
estimators_to_be_removed.append(est_dir.GetName())
print "Bin edges for given percentile bins"
print nch_edges
for est in estimators_to_be_removed:
del self.perc_bins[est]
del self.considered_ests[self.considered_ests.index(est)]
return nch_edges
@_io_decorator
def delete_results_dir(self):
# delete old result directory
self.f.rm('MultEstimators/' + self.results_dir_name)
self.f.Write()
@_io_decorator
def make_results_dir(self):
self.f.mkdir('MultEstimators/' + self.results_dir_name, recurse=True)
for est_dir in get_est_dirs(self.sums, self.considered_ests):
try:
resdir = self.f.MultEstimators.__getattr__(self.results_dir_name).mkdir(est_dir.GetName())
resdir.Write()
except:
pass
@_io_decorator
def plot_particle_ratios_vs_estmult(self, pids1, pids2, scale=None, ytitle=''):
ratio_vs_estmult_dir = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
+ '/pid_ratios_vs_estmult')
fig = Figure()
if not ytitle:
fig.ytitle = ", ".join(pids1) + " / " + ", ".join(pids2)
else:
fig.ytitle = ytitle
for est_dir in get_est_dirs(self.sums, self.considered_ests):
h3d = asrootpy(est_dir.FindObject("fNch_pT_pid"))
pids1hists = [get_identified_vs_mult(h3d, pdg) for pdg in pids1]
pids2hists = [get_identified_vs_mult(h3d, pdg) for pdg in pids2]
pids1_px = sum(pids1hists)
pids2_px = sum(pids2hists)
ratio1d = pids1_px / pids2_px
fig.xtitle = "N_{ch}|_{" + make_estimator_title(est_dir.GetName()) + "}"
if scale:
ratio1d.Scale(scale)
fig.add_plottable(ratio1d, legend_title=make_estimator_title(est_dir.GetName()))
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratio_vs_estmult_dir)
@_io_decorator
def plot_event_counters(self):
log.info("Creating event counters")
for est_dir in get_est_dirs(self.sums, self.considered_ests):
results_est_dir = self.results_post.__getattr__(est_dir.GetName())
# Nasty, but just use a reference estimator here...
corr = get_correlation_histogram(self.sums, est_dir.GetName(), "EtaLt05")
counter = asrootpy(corr.ProjectionX())
counter.name = "event_counter"
path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path
self.f.cd(path)
results_est_dir.WriteTObject(counter)
@_io_decorator
def plot_dNdetas(self, ratio_to_mb):
# Loop over all estimators in the Sums list:
log.info("Creating dN/deta bin in multiplicity")
figs = []
for est_dir in get_est_dirs(self.sums, self.considered_ests):
# does this estimator have several multiplicity bins?
# Q2, for example only works with pythia and makes no sense to plot
# on Dipsy as it would only be the MB line
if len(self.nch_edges[est_dir.GetName()]) == 1:
continue
results_est_dir = self.results_post.Get(est_dir.GetName())
event_counter = asrootpy(results_est_dir.Get("event_counter"))
fig = Figure()
fig.plot.palette = 'colorblind'
fig.xtitle = '#eta'
fig.ytitle = 'Ratio of dN_{ch}/d#eta over MB result' if ratio_to_mb else '1/N #times dN_{ch}/d#eta'
fig.legend.title = make_estimator_title(est_dir.GetName())
fig.plot.ymin = 0
dNdeta_mb = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter,
[1, event_counter.GetXaxis().GetNbins()])
for cls_bin, perc_bin in zip(self.nch_edges[est_dir.GetName()], self.perc_bins[est_dir.GetName()]):
title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
dNdeta_in_interval = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, cls_bin)
if ratio_to_mb:
fig.add_plottable(dNdeta_in_interval / dNdeta_mb, legend_title=title)
else:
fig.add_plottable(dNdeta_in_interval, legend_title=title)
# add MB as well, if it is not the ratio plots we are making
if not ratio_to_mb:
title = "MB"
fig.add_plottable(dNdeta_mb, legend_title=title)
path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path
if ratio_to_mb:
fig.save_to_root_file(self.f, "dNdeta_MB_ratio_summary", path=path)
else:
fig.save_to_root_file(self.f, "dNdeta_summary", path=path)
figs.append(fig)
return figs
@_io_decorator
def plot_pt_distribution_ratios(self):
# create particle ratio vs pT plots
log.info("Computing histograms vs pt")
results_path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
# Loop over all estimators in the Sums list:
figs = []
def get_new_figure():
fig = Figure()
fig.xtitle = 'p_{T} (GeV)'
fig.plot.ymin = 0
fig.plot.xmax = 10
fig.plot.palette = 'colorblind'
# fig.plot.palette_ncolors = len(nch_edges) - 1
fig.legend.position = 'br'
return fig
for est_dir in get_est_dirs(self.results_post, self.considered_ests):
dirname = '{0}/{1}/pid_ratios/'.format(results_path, est_dir.GetName())
mult_binned_pt_dists = {}
mult_binned_pt_dists['proton'] = [
get_pT_distribution(est_dir, [kANTIPROTON, kPROTON], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['pi_ch'] = [
get_pT_distribution(est_dir, [kPIMINUS, kPIPLUS], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['xi'] = [
get_pT_distribution(est_dir, [kANTIXI, kXI], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['omega'] = [
get_pT_distribution(est_dir, [kOMEGAMINUS, kOMEGAPLUS], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['lambda'] = [
get_pT_distribution(est_dir, [kANTILAMBDA, kLAMBDA], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['k0s'] = [
get_pT_distribution(est_dir, [kK0S], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['k_ch'] = [
get_pT_distribution(est_dir, [kKPLUS, kKMINUS], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
mult_binned_pt_dists['pi0'] = [
get_pT_distribution(est_dir, [kPI0], classifier_bin_interval)
for classifier_bin_interval in self.nch_edges[est_dir.GetName()]
]
perc_titles = ["{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
for perc_bin in self.perc_bins[est_dir.GetName()]]
fig = get_new_figure()
name = "proton_over_pich__vs__pt"
fig.ytitle = "(p+#bar{p})/#pi^{+-}"
fig.plot.ymax = .3
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Xi_over_pich__vs__pt"
fig.plot.ymax = .06
fig.legend.position = 'tl'
fig.ytitle = "#Xi/#pi^{+-}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "OmegaCh_over_pich__vs__pt"
fig.plot.ymax = .005
fig.legend.position = 'tl'
fig.ytitle = "#Omega_{ch}/#pi^{+-} "
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
# Ratios to pi0
fig = get_new_figure()
name = "pich_over_pi0__vs__pt"
fig.plot.ymax = 2.5
fig.legend.position = 'bl'
fig.ytitle = "#pi^{+-}/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['pi_ch'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "proton_over_pi0__vs__pt"
fig.plot.ymax = 1
fig.legend.position = 'tr'
fig.ytitle = "p/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "K0S_over_pi0__vs__pt"
fig.plot.ymax = 1.4
fig.legend.position = 'tl'
fig.ytitle = "K^{0}_{S}/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['k0s'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Lambda_over_pi0__vs__pt"
fig.plot.ymax = .9
fig.legend.position = 'tl'
fig.ytitle = "#Lambda/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Xi_over_pi0__vs__pt"
fig.plot.ymax = .08
fig.legend.position = 'tl'
fig.ytitle = "#Xi/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "OmegaCh_over_pi0__vs__pt"
fig.plot.ymax = .005
fig.legend.position = 'tl'
fig.ytitle = "#Omega_{ch}/#pi^{0}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi0'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
# Ratios to K0S
fig = get_new_figure()
name = "proton_over_K0S__vs__pt"
fig.plot.ymax = 2.6
fig.legend.position = 'tr'
fig.ytitle = "p/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Lambda_over_K0S__vs__pt"
fig.plot.ymax = 1
fig.legend.position = 'bl'
fig.ytitle = "#Lambda/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Xi_over_K0S__vs__pt"
fig.plot.ymax = .2
fig.legend.position = 'tl'
fig.ytitle = "#Xi/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "OmegaCh_over_K0S__vs__pt"
fig.plot.ymax = .012
fig.legend.position = 'tl'
fig.ytitle = "#Omega_{ch}/K^{0}_{S}"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['k0s'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
fig = get_new_figure()
name = "Kaon_over_pich__vs__pt"
fig.plot.ymax = 1
fig.legend.position = 'tl'
fig.ytitle = "(K^{+} + K^{-}) / (#pi^{+} +#pi^{-})"
fig.legend.title = make_estimator_title(est_dir.GetName())
[
fig.add_plottable(h1 / h2, legend_title=title)
for h1, h2, title in zip(mult_binned_pt_dists['k_ch'], mult_binned_pt_dists['pi_ch'], perc_titles)
]
fig.save_to_root_file(self.f, name, dirname)
figs.append(fig)
return figs
@_io_decorator
def plot_PNch_summary(self):
log.info("Creating P(Nch) summary plot")
summary_fig = Figure()
summary_fig.xtitle = "N_{ch}^{est}"
summary_fig.ytitle = "P(N_{ch}^{est})"
summary_fig.legend.position = 'tr'
summary_fig.plot.logy = True
for est_dir in get_est_dirs(self.sums, self.considered_ests):
est_name = est_dir.GetName()
h_tmp = get_PNch_vs_estmult(self.sums, est_name)
if h_tmp.Integral() > 0:
h_tmp.Scale(1.0 / h_tmp.Integral())
summary_fig.add_plottable(h_tmp, make_estimator_title(est_name))
path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
summary_fig.save_to_root_file(self.f, "PNch_summary", path=path)
# list as return type is expected for making the pdf
return [summary_fig]
@_io_decorator
def plot_PNch(self):
log.info("Creating P(Nch_est) and P(Nch_refest) histograms")
# mult_bin_size = 10
figs = []
for ref_est_name in self.ref_ests:
for res_est_dir in get_est_dirs(self.results_post, self.considered_ests):
est_name = res_est_dir.GetName()
# Figure properties:
fig_vs_estmult = Figure()
fig_vs_refmult = Figure()
fig_vs_estmult.plot.logy = True
fig_vs_refmult.plot.logy = True
fig_vs_estmult.plot.palette = 'colorblind'
fig_vs_refmult.plot.palette = 'colorblind'
fig_vs_estmult.legend.position = 'tr'
fig_vs_refmult.legend.position = 'tr'
fig_vs_estmult.xtitle = "N_{{ch}}^{{{0}}}".format(est_name)
fig_vs_refmult.xtitle = "N_{{ch}}^{{{0}}}".format(ref_est_name)
fig_vs_estmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(est_name)
fig_vs_refmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(ref_est_name)
corr_hist = get_correlation_histogram(self.sums, est_name, ref_est_name)
# logic when dealing with fixed bins given in Nch:
# ------------------------------------------------
# mean_nch_est = corr_hist.GetMean(1) # mean of x axis
# nch_max = corr_hist.xaxis.GetNbins()
# nch_cutoff = mean_nch_est * mean_mult_cutoff_factor
# nch_bins = [(low, low + mult_bin_size) for low in range(0, int(nch_cutoff), mult_bin_size)]
# # a large last bin covering the rest:
# nch_bins += [(nch_bins[-1][2], nch_max)]
# legend_tmpl = "{} < N_{ch} < {}"
# logic when dealing with percentile bins:
# ----------------------------------------
# event_counter_est = asrootpy(getattr(res_est_dir, "event_counter"))
legend_tmpl = "{0}% - {1}%"
fig_vs_estmult.legend.title = "Selected in {0}".format(make_estimator_title(ref_est_name))
fig_vs_refmult.legend.title = "Selected in {0}".format(make_estimator_title(est_name))
# WARNING: the following needs tweeking when going back to fixed N_ch bins!
for nch_bin, perc_bin in zip(self.nch_edges[ref_est_name], self.perc_bins[ref_est_name]):
# vs est_mult:
corr_hist.xaxis.SetRange(0, 0) # reset x axis
corr_hist.yaxis.SetRange(nch_bin[0], nch_bin[1])
h_vs_est = asrootpy(corr_hist.ProjectionX(gen_random_name()))
if h_vs_est.Integral() > 0:
h_vs_est.Scale(1.0 / h_vs_est.Integral())
fig_vs_estmult.add_plottable(h_vs_est, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100))
else:
log.info("No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen".
format(perc_bin, ref_est_name))
for nch_bin, perc_bin in zip(self.nch_edges[est_name], self.perc_bins[est_name]):
# vs ref_mult:
corr_hist.yaxis.SetRange(0, 0) # reset y axis
corr_hist.xaxis.SetRange(*nch_bin)
h_vs_ref = asrootpy(corr_hist.ProjectionY(gen_random_name()))
if h_vs_ref.Integral() > 0:
h_vs_ref.Scale(1.0 / h_vs_ref.Integral())
fig_vs_refmult.add_plottable(h_vs_ref, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100))
else:
log.info(
"No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen".
format(perc_bin, est_name))
path = res_est_dir.GetPath().split(":")[1]
# vs est_mult
fig_vs_estmult.save_to_root_file(self.f, "PNchEst_binned_in_Nch{0}".format(ref_est_name), path)
# vs est_mult
fig_vs_refmult.save_to_root_file(self.f, "PNch{0}_binned_in_NchEst".format(ref_est_name), path)
figs.append(fig_vs_estmult)
figs.append(fig_vs_refmult)
return figs
@_io_decorator
def plot_mult_vs_pt(self):
log.info("Makeing 2D pt plots for each particle kind")
for est_dir in get_est_dirs(self.sums, self.considered_ests):
path = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
+ "/" + est_dir.GetName()
+ "/mult_pt")
try:
self.f.mkdir(path, recurse=True)
except ValueError:
pass
self.f.cd(path)
h3d = asrootpy(est_dir.FindObject('classifier_pT_PID_{0}'.format(est_dir.GetName())))
# loop through all particle kinds:
nPIDs = h3d.zaxis.GetNbins()
for ibin in range(1, nPIDs + 1):
h3d.zaxis.SetRange(ibin, ibin)
mult_pt = asrootpy(h3d.Project3D("yx"))
mult_pt.name = h3d.zaxis.GetBinLabel(ibin)
mult_pt.Write()
@_io_decorator
def plot_correlation(self):
# Make correlations between estimators
log.info("Correlating N_ch of each estimator")
corr_dir = self.results_post.GetPath().split(":")[1] + '/correlations'
try:
self.f.mkdir(corr_dir, recurse=True)
except:
pass
# Take ntuple from the first estimator and then add friends to this one
nt0 = self.sums[0].FindObject("fEventTuple")
nt0.SetAlias(self.sums[0].GetName(), "fEventTuple")
# build ntuple
for est_dir in self.sums[1:]:
nt0.AddFriend(est_dir.FindObject("fEventTuple"), est_dir.GetName())
for ref_est in self.considered_ests:
for est_dir in self.sums:
log.info("Correlating {0} with {1}".format(ref_est, est_dir.GetName()))
corr_hist = Hist2D(400, 0, 400,
400, 0, 400,
name="corr_hist_{0}_vs_{1}".format(ref_est, est_dir.GetName()))
# Lables are deliberatly swaped, see Projection below!
corr_hist.title = ("Correlation N_{{ch}} in {0} and {1};N_{{ch}} {1};N_{{ch}} {0}"
.format(ref_est, est_dir.GetName()))
# this projects onto y:x, to make coding more adventurous
nt0.Project(corr_hist.name, "{0}.nch:{1}.nch".format(ref_est, est_dir.GetName()),
"ev_weight")
corr_hist.drawstyle = 'colz'
self.f.cd(corr_dir)
corr_hist.write()
@_io_decorator
def plot_pid_ratio_vs_refmult(self):
log.info("Creating plots vs refmult")
ratios_dir = self.results_post.GetPath().split(":")[1] + '/pid_ratios_vs_refmult'
def get_new_figure():
fig = Figure()
fig.plot.ncolors = len(self.considered_ests)
fig.xtitle = "N_{ch}|_{" + make_estimator_title('EtaLt05') + "}"
fig.plot.xmin = 0
fig.plot.xmax = 60
return fig
figs = []
# Proton / pi_ch
fig = get_new_figure()
pids1, pids2 = ['-2212', '2212'], ['-211', '211']
fig.ytitle = "p/#pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.04, 0.13
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, )
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K / pi_ch
fig = get_new_figure()
pids1, pids2 = ['310', '321', '-321'], ['-211', '211']
fig.ytitle = "K^{*}/#pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.09, 0.30
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Lambda / pi_ch
fig = get_new_figure()
pids1, pids2 = ['3122'], ['-211', '211']
fig.ytitle = "#Lambda / #pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.005, 0.035
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Xi / pi_ch
fig = get_new_figure()
pids1, pids2 = ['3312'], ['-211', '211']
fig.ytitle = "#Xi / #pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.0004, 0.003
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Omega / pi_ch
fig = get_new_figure()
pids1, pids2 = ['3334', '-3334'], ['-211', '211']
fig.ytitle = "#Omega / #pi^{+-}"
fig.plot.ymin, fig.plot.ymax = 0.00001, 0.0005
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# pi_ch/pi0
fig = get_new_figure()
pids1, pids2 = ['-211', '211'], ['111']
fig.ytitle = "#pi^{+-}/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 1.5, 2.2
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# proton / pi0
fig = get_new_figure()
pids1, pids2 = ['-2212', '2212'], ['111']
fig.ytitle = "p/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.09, 0.30
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K / pi0
fig = get_new_figure()
pids1, pids2 = ['310', '321', '-321'], ['111']
fig.ytitle = "K^{*}/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.15, 0.50
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Lambda / pi0
fig = get_new_figure()
pids1, pids2 = ['3122'], ['111']
fig.ytitle = "#Lambda/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.014, 0.045
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Xi / pi0
fig = get_new_figure()
pids1, pids2 = ['3312'], ['111']
fig.ytitle = "#Xi/#pi^{0}"
fig.plot.ymin, fig.plot.ymax = 0.0010, 0.005
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# Omega / pi0
fig = get_new_figure()
pids1, pids2 = ['3334', '-3334'], ['111']
fig.ytitle = "#Omega/#pi^{0}"
fig.legend.position = 'tl'
fig.plot.ymin, fig.plot.ymax = 0.00002, 0.0008
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K_ch / K0_S
fig = get_new_figure()
pids1, pids2 = ['321', '-321'], ['310']
fig.ytitle = "(K^{+}+K^{-}) / (2#timesK^{0}_{S})"
fig.plot.ymin, fig.plot.ymax = 0.4, 1.5
fig.legend.position = 'tl'
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, scale=.5)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K0_S / Lambda
fig = get_new_figure()
pids1, pids2 = ['310'], ['-3122', '3122']
fig.ytitle = "K^{0}_{S} / #Lambda"
fig.plot.ymin, fig.plot.ymax = 1.3, 3.7
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
# K0_S / Xi
fig = get_new_figure()
pids1, pids2 = ['310'], ['3312']
fig.ytitle = "K^{0}_{S} / #Xi"
fig.plot.ymin, fig.plot.ymax = 15, 80
graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2)
[fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs]
name = "_".join(pids1) + "_div_" + "_".join(pids2)
fig.save_to_root_file(self.f, name, ratios_dir)
figs.append(fig)
return figs
# ######################################################################################
# # vs Est mult
# _plot_particle_ratios_vs_estmult(self, ['321', '-321'], ['310'],
# scale=.5, fig.ytitle = "(K^{+} + K^{-}) / (2*K_{S}^{0})")
@_io_decorator
def plot_meanpt_vs_ref_mult_for_pids(self):
log.info("Creating mean pT plots")
figs = []
for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests),
get_est_dirs(self.results_post, self.considered_ests)):
if sums_est_dir.GetName() != res_est_dir.GetName():
raise IndexError("Order of estimator dirs is different in sums and results_post")
res_dir_str = res_est_dir.GetPath().split(":")[1]
corr_hist = get_correlation_histogram(self.sums, sums_est_dir.GetName(), "EtaLt05")
# Get the <pT> per classifier bin; then, re-map the classifier value to the reference classifier (eg EtaLt05)
# This might not make a lot of sense, actually. Maybe it would be much more telling if I were to
# put the percentile bins on the x-axis? As in the highest 1% of that classifier has a <pT> of ...
graphs = []
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPI0, kPIMINUS, kPIPLUS]), corr_hist))
graphs[-1].title = "#pi"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kKMINUS, kKPLUS]), corr_hist))
graphs[-1].title = "K^{#pm}"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPROTON, kANTIPROTON]), corr_hist))
graphs[-1].title = "p"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kK0S]), corr_hist))
graphs[-1].title = "K^{0}_{S}"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kLAMBDA, kANTILAMBDA]), corr_hist))
graphs[-1].title = "#Lambda"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kXI, kANTIXI]), corr_hist))
graphs[-1].title = "#Xi"
graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kOMEGAMINUS, kOMEGAPLUS]), corr_hist))
graphs[-1].title = "#Omega"
# sanitize graphs:
for g in graphs:
remove_zero_value_points(g)
remove_points_with_x_err_gt_1NchRef(g)
remove_points_with_equal_x(g)
fig = Figure()
fig.plot.palette = 'root'
fig.plot.ncolors = 7
fig.plot.xmin = 0
fig.plot.xmax = 40
fig.plot.ymin = 0.3
fig.plot.ymax = 2.1
fig.ytitle = "<p_{T}>"
fig.xtitle = "N_{ch}|_{|#eta|<0.5}"
fig.legend.title = make_estimator_title(sums_est_dir.GetName())
[fig.add_plottable(g, g.title) for g in graphs]
fig.save_to_root_file(self.f, "mean_pt", res_dir_str)
figs.append(fig)
return figs
# def _plot_event_counter_with_shaded_perc_areas(f, results_post):
# log.info("Broken: Root sucks! Creating shaded event counter with percentile regions")
# return
# for est_dir in get_est_dirs(results_post):
# event_counter = asrootpy(getattr(est_dir, "event_counter"))
# nch_edges = get_Nch_edges_for_percentile_edges(perc_edges, event_counter)
# c = Canvas(name="event_counter_with_perc")
# leg = Legend(len(nch_edges) - 1)
# copies = []
# colors = get_color_generator(ncolors=10)
# # Draw the hist once
# event_counter.Draw()
# for nch_low, nch_up in zip(nch_edges[:-1], nch_edges[1:]):
# copies.append(event_counter.Clone(gen_random_name()))
# copies[-1].xaxis.SetRangeUser(nch_low, nch_up)
# copies[-1].SetFillStyle(1001)
# copies[-1].color = next(colors)
# copies[-1].xaxis.title = "N_{ch}"
# copies[-1].yaxis.title = "counts"
# leg.AddEntry(copies[-1], "{}-{}%".format(str(nch_low), str(nch_up)))
# copies[-1].Draw('sameHist')
# break
# leg.Draw()
# est_dir.cd()
# c.Write()
@_io_decorator
def plot_dNdpT(self, pid_selection):
"""
Plot dNdpT particles in pid_selection
Parameters
----------
pid_selection : str
Either all charged particles ('ch') or 'pi', 'K' or 'p'
"""
log.info("1/N_evts dN_ch/dpT plots")
figs = []
for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests),
get_est_dirs(self.results_post, self.considered_ests)):
if sums_est_dir.GetName() != res_est_dir.GetName():
raise IndexError("Order of estimator dirs is different in sums and results_post")
res_dir_str = res_est_dir.GetPath().split(":")[1]
fig = Figure()
fig.plot.palette = 'colorblind'
# fig.plot.ncolors = 5
fig.legend.position = 'tr'
fig.ytitle = "1/N_{evts} dN/dp_{T} (" + make_estimator_title(sums_est_dir.GetName()) + ")"
fig.xtitle = "p_{T} (GeV)"
fig.plot.logy = True
hists = []
if pid_selection == 'ch':
fig.legend.title = "#pi^{#pm}, K^{#pm}, p, #Lambda, #Xi, #Omega"
pid_numbers = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON,
kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS]
if pid_selection == 'pi':
fig.legend.title = "#pi^{#pm}"
pid_numbers = [kPIMINUS, kPIPLUS]
if pid_selection == 'K':
fig.legend.title = "K^{#pm}"
pid_numbers = [kKMINUS, kKPLUS]
if pid_selection == 'p':
fig.legend.title = "p, #bar{p}"
pid_numbers = [kPROTON, kANTIPROTON]
for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]):
hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin, normalized=False))
hists[-1].title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
# add MB last to be consistent with colors in other plots; the very first and very last bin we look at
classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1])
hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin_mb, normalized=False))
hists[-1].title = "MB"
# scale by bin width
[h.Scale(1, "width") for h in hists]
[fig.add_plottable(p, p.title) for p in hists]
fig.save_to_root_file(self.f, "dN{0}dpT".format(pid_selection), res_dir_str)
figs.append(fig)
return figs
@_io_decorator
def plot_pT_HM_div_pt_MB(self, scale_nMPI):
log.info("Plot dN_{HM}/dpT / dN_{MB}/dpT ratios scaled with nMPI")
figs = []
for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests),
get_est_dirs(self.results_post, self.considered_ests)):
if sums_est_dir.GetName() != res_est_dir.GetName():
raise IndexError("Order of estimator dirs is different in sums and results_post")
res_dir_str = res_est_dir.GetPath().split(":")[1]
fig = Figure()
fig.plot.palette = 'root'
fig.plot.ncolors = 7
fig.xtitle = "p_{T} (GeV)"
fig.legend.title = make_estimator_title(sums_est_dir.GetName())
if scale_nMPI:
fig.ytitle = ("#left[ #frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}} #right] "
"#times #left[ #frac{<N_{MPI}^{MB}>}{<N_{MPI}^{HM}>} #right]")
else:
fig.ytitle = "#frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}}"
charged_particles = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON,
kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS]
# get the MB distribution which will be used to devide the nch-binned distributions
classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0],
self.nch_edges[sums_est_dir.GetName()][-1][-1])
pt_dist_mb = get_pT_distribution(res_est_dir, charged_particles, classifier_bin_mb, normalized=False)
mean_nmpi_mb = get_mean_nMPI(sums_est_dir, classifier_bin_mb)
for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()],
self.nch_edges[sums_est_dir.GetName()]):
# get the pt distribution in this Nch interval
pt_dist_in_interval = get_pT_distribution(res_est_dir, charged_particles,
classifier_bin, normalized=False)
title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100)
if scale_nMPI:
mean_nmpi_hm = get_mean_nMPI(sums_est_dir, classifier_bin)
fig.add_plottable((pt_dist_in_interval / pt_dist_mb) * (mean_nmpi_mb / mean_nmpi_hm), title)
name = "pt_hm_div_pt_mb_scaled_nMPI"
else:
fig.add_plottable((pt_dist_in_interval / pt_dist_mb), title)
name = "pt_hm_div_pt_mb"
fig.save_to_root_file(self.f, name, res_dir_str)
figs.append(fig)
return figs
@_io_decorator
def plot_nMPI_vs_Nch(self):
log.info("Creating nMPI(Nch) summary plot")
summary_fig = Figure()
summary_fig.xtitle = "N_{ch}^{est}"
summary_fig.ytitle = "<N_{MPI}>"
summary_fig.plot.palette = 'root'
summary_fig.legend.position = 'br'
summary_fig.plot.logy = True
summary_fig.plot.ymin = 1
for est_dir in get_est_dirs(self.sums, self.considered_ests):
h_tmp = asrootpy(get_correlation_histogram(self.sums, est_dir.GetName(), "nMPI").ProfileX())
summary_fig.add_plottable(h_tmp, make_estimator_title(est_dir.GetName()))
path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path
summary_fig.save_to_root_file(self.f, "nMPI_summary", path=path)
return [summary_fig]
|
angr/angr
|
refs/heads/master
|
angr/engines/pcode/arch/ArchPcode_tricore_LE_32_default.py
|
1
|
###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_tricore_LE_32_default(ArchPcode):
name = 'tricore:LE:32:default'
pcode_arch = 'tricore:LE:32:default'
description = 'Siemens Tricore Embedded Processor'
bits = 32
ip_offset = 0xfe08
sp_offset = 0xffa8
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('contextreg', 4, 0x0),
Register('task_asi', 4, 0x8004),
Register('pma0', 4, 0x8100),
Register('pma1', 4, 0x8104),
Register('pma2', 4, 0x8108),
Register('dcon2', 4, 0x9000),
Register('dcon1', 4, 0x9008),
Register('smacon', 4, 0x900c),
Register('dstr', 4, 0x9010),
Register('datr', 4, 0x9018),
Register('deadd', 4, 0x901c),
Register('diear', 4, 0x9020),
Register('dietr', 4, 0x9024),
Register('dcon0', 4, 0x9040),
Register('pstr', 4, 0x9200),
Register('pcon1', 4, 0x9204),
Register('pcon2', 4, 0x9208),
Register('pcon0', 4, 0x920c),
Register('piear', 4, 0x9210),
Register('pietr', 4, 0x9214),
Register('compat', 4, 0x9400),
Register('fpu_trap_con', 4, 0xa000),
Register('fpu_trap_pc', 4, 0xa004),
Register('fpu_trap_opc', 4, 0xa008),
Register('fpu_trap_src1', 4, 0xa010),
Register('fpu_trap_src2', 4, 0xa014),
Register('fpu_trap_src3', 4, 0xa018),
Register('dpr0_l', 4, 0xc000),
Register('dpr0_u', 4, 0xc004),
Register('dpr1_l', 4, 0xc008),
Register('dpr1_u', 4, 0xc00c),
Register('dpr2_l', 4, 0xc010),
Register('dpr2_u', 4, 0xc014),
Register('dpr3_l', 4, 0xc018),
Register('dpr3_u', 4, 0xc01c),
Register('dpr4_l', 4, 0xc020),
Register('dpr4_u', 4, 0xc024),
Register('dpr5_l', 4, 0xc028),
Register('dpr5_u', 4, 0xc02c),
Register('dpr6_l', 4, 0xc030),
Register('dpr6_u', 4, 0xc034),
Register('dpr7_l', 4, 0xc038),
Register('dpr7_u', 4, 0xc03c),
Register('dpr8_l', 4, 0xc040),
Register('dpr8_u', 4, 0xc044),
Register('dpr9_l', 4, 0xc048),
Register('dpr9_u', 4, 0xc04c),
Register('dpr10_l', 4, 0xc050),
Register('dpr10_u', 4, 0xc054),
Register('dpr11_l', 4, 0xc058),
Register('dpr11_u', 4, 0xc05c),
Register('dpr12_l', 4, 0xc060),
Register('dpr12_u', 4, 0xc064),
Register('dpr13_l', 4, 0xc068),
Register('dpr13_u', 4, 0xc06c),
Register('dpr14_l', 4, 0xc070),
Register('dpr14_u', 4, 0xc074),
Register('dpr15_l', 4, 0xc078),
Register('dpr15_u', 4, 0xc07c),
Register('cpr0_l', 4, 0xd000),
Register('cpr0_u', 4, 0xd004),
Register('cpr1_l', 4, 0xd008),
Register('cpr1_u', 4, 0xd00c),
Register('cpr2_l', 4, 0xd010),
Register('cpr2_u', 4, 0xd014),
Register('cpr3_l', 4, 0xd018),
Register('cpr3_u', 4, 0xd01c),
Register('cpr4_l', 4, 0xd020),
Register('cpr4_u', 4, 0xd024),
Register('cpr5_l', 4, 0xd028),
Register('cpr5_u', 4, 0xd02c),
Register('cpr6_l', 4, 0xd030),
Register('cpr6_u', 4, 0xd034),
Register('cpr7_l', 4, 0xd038),
Register('cpr7_u', 4, 0xd03c),
Register('cpr8_l', 4, 0xd040),
Register('cpr8_u', 4, 0xd044),
Register('cpr9_l', 4, 0xd048),
Register('cpr9_u', 4, 0xd04c),
Register('cpr10_l', 4, 0xd050),
Register('cpr10_u', 4, 0xd054),
Register('cpr11_l', 4, 0xd058),
Register('cpr11_u', 4, 0xd05c),
Register('cpr12_l', 4, 0xd060),
Register('cpr12_u', 4, 0xd064),
Register('cpr13_l', 4, 0xd068),
Register('cpr13_u', 4, 0xd06c),
Register('cpr14_l', 4, 0xd070),
Register('cpr14_u', 4, 0xd074),
Register('cpr15_l', 4, 0xd078),
Register('cpr15_u', 4, 0xd07c),
Register('cpxe_0', 4, 0xe000),
Register('cpxe_1', 4, 0xe004),
Register('cpxe_2', 4, 0xe008),
Register('cpxe_3', 4, 0xe00c),
Register('dpre_0', 4, 0xe010),
Register('dpre_1', 4, 0xe014),
Register('dpre_2', 4, 0xe018),
Register('dpre_3', 4, 0xe01c),
Register('dpwe_0', 4, 0xe020),
Register('dpwe_1', 4, 0xe024),
Register('dpwe_2', 4, 0xe028),
Register('dpwe_3', 4, 0xe02c),
Register('tps_con', 4, 0xe400),
Register('tps_timer0', 4, 0xe404),
Register('tps_timer1', 4, 0xe408),
Register('tps_timer2', 4, 0xe40c),
Register('tr0evt', 4, 0xf000),
Register('tr0adr', 4, 0xf004),
Register('tr1evt', 4, 0xf008),
Register('tr1adr', 4, 0xf00c),
Register('tr2evt', 4, 0xf010),
Register('tra2dr', 4, 0xf014),
Register('tr3evt', 4, 0xf018),
Register('tr3adr', 4, 0xf01c),
Register('tr4evt', 4, 0xf020),
Register('tr4adr', 4, 0xf024),
Register('tr5evt', 4, 0xf028),
Register('tr5adr', 4, 0xf02c),
Register('tr6evt', 4, 0xf030),
Register('tr6adr', 4, 0xf034),
Register('tr7evt', 4, 0xf038),
Register('tr7adr', 4, 0xf03c),
Register('cctrl', 4, 0xfc00),
Register('ccnt', 4, 0xfc04),
Register('icnt', 4, 0xfc08),
Register('m1cnt', 4, 0xfc0c),
Register('m2cnt', 4, 0xfc10),
Register('m3cnt', 4, 0xfc14),
Register('dbgsr', 4, 0xfd00),
Register('exevt', 4, 0xfd08),
Register('crevt', 4, 0xfd0c),
Register('swevt', 4, 0xfd10),
Register('trig_acc', 4, 0xfd30),
Register('dms', 4, 0xfd40),
Register('dcx', 4, 0xfd44),
Register('dbgtcr', 4, 0xfd48),
Register('pcxi', 4, 0xfe00),
Register('psw', 4, 0xfe04),
Register('pc', 4, 0xfe08, alias_names=('ip',)),
Register('syscon', 4, 0xfe14),
Register('cpu_id', 4, 0xfe18),
Register('core_id', 4, 0xfe1c),
Register('biv', 4, 0xfe20),
Register('btv', 4, 0xfe24),
Register('isp', 4, 0xfe28),
Register('icr', 4, 0xfe2c),
Register('pipn', 1, 0xfe2e),
Register('fcx', 4, 0xfe38),
Register('lcx', 4, 0xfe3c),
Register('e0', 8, 0xff00),
Register('d0', 4, 0xff00),
Register('d1', 4, 0xff04),
Register('e2', 8, 0xff08),
Register('d2', 4, 0xff08),
Register('d3', 4, 0xff0c),
Register('e4', 8, 0xff10),
Register('d4', 4, 0xff10),
Register('d5', 4, 0xff14),
Register('e6', 8, 0xff18),
Register('d6', 4, 0xff18),
Register('d7', 4, 0xff1c),
Register('e8', 8, 0xff20),
Register('d8', 4, 0xff20),
Register('d9', 4, 0xff24),
Register('e10', 8, 0xff28),
Register('d10', 4, 0xff28),
Register('d11', 4, 0xff2c),
Register('e12', 8, 0xff30),
Register('d12', 4, 0xff30),
Register('d13', 4, 0xff34),
Register('e14', 8, 0xff38),
Register('d14', 4, 0xff38),
Register('d15', 4, 0xff3c),
Register('p0', 8, 0xff80),
Register('a0', 4, 0xff80),
Register('a1', 4, 0xff84),
Register('p2', 8, 0xff88),
Register('a2', 4, 0xff88),
Register('a3', 4, 0xff8c),
Register('p4', 8, 0xff90),
Register('a4', 4, 0xff90),
Register('a5', 4, 0xff94),
Register('p6', 8, 0xff98),
Register('a6', 4, 0xff98),
Register('a7', 4, 0xff9c),
Register('p8', 8, 0xffa0),
Register('a8', 4, 0xffa0),
Register('a9', 4, 0xffa4),
Register('p10', 8, 0xffa8),
Register('a10', 4, 0xffa8),
Register('a11', 4, 0xffac),
Register('p12', 8, 0xffb0),
Register('a12', 4, 0xffb0),
Register('a13', 4, 0xffb4),
Register('p14', 8, 0xffb8),
Register('a14', 4, 0xffb8),
Register('a15', 4, 0xffbc),
Register('r0', 4, 0xf0043f00),
Register('r1', 4, 0xf0043f04),
Register('r2', 4, 0xf0043f08),
Register('r3', 4, 0xf0043f0c),
Register('r4', 4, 0xf0043f10),
Register('r5', 4, 0xf0043f14),
Register('r6', 4, 0xf0043f18),
Register('r7', 4, 0xf0043f1c)
]
register_arch(['tricore:le:32:default'], 32, Endness.LE, ArchPcode_tricore_LE_32_default)
|
okulikov/modeshape
|
refs/heads/master
|
bin/markdown2.py
|
17
|
#!/usr/bin/env python
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<http://code.google.com/p/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extras (see -x|--extras option below):
* code-friendly: Disable _ and __ for em and strong.
* code-color: Pygments-based syntax coloring of <code> sections.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
"""
# Dev Notes:
# - There is already a Python markdown processor
# (http://www.freewisdom.org/projects/python-markdown/).
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (1, 0, 1, 18) # first three nums match Markdown.pl
__version__ = '1.0.1.18'
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
from urllib import quote
#---- Python version compat
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
def _unicode_decode(s, encoding, errors='xmlcharrefreplace'):
return unicode(s, encoding, errors)
else:
def _unicode_decode(s, encoding, errors='strict'):
return s.decode(encoding, errors)
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
try:
import uuid
except ImportError:
SECRET_SALT = str(randint(0, 1000000))
else:
SECRET_SALT = str(uuid.uuid4())
def _hash_ascii(s):
#return md5(s).hexdigest() # Markdown.pl effectively does this.
return 'md5-' + md5(SECRET_SALT + s).hexdigest()
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_ascii(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_ascii('"')
self._escape_table["'"] = _hash_ascii("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in emacs_vars.items():
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError, ex:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError, ex:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_data = [
('*', re.compile(r"^[ ]{0,3}\*(.*?)$", re.M)),
('-', re.compile(r"^[ ]{0,3}\-(.*?)$", re.M)),
('_', re.compile(r"^[ ]{0,3}\_(.*?)$", re.M)),
]
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
for ch, regex in self._hr_data:
if ch in text:
for m in reversed(list(regex.finditer(text))):
tail = m.group(1).rstrip()
if not tail.strip(ch + ' ') and tail.count(" ") == 0:
start, end = m.span()
text = text[:start] + hr + text[end:]
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in self.html_spans.items():
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_tail_of_inline_link_re = re.compile(r'''
# Match tail of: [text](/url/) or [text](/url/ "title")
\( # literal paren
[ \t]*
(?P<url> # \1
<.*?>
|
.*?
)
[ \t]*
( # \2
(['"]) # quote char = \3
(?P<title>.*?)
\3 # matching quote
)? # title is optional
\)
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
match = self._tail_of_inline_link_re.match(text, p)
if match:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
url, title = match.group("url"), match.group("title")
if url and url[0] == '<':
url = url[1:-1] # '<url>' -> 'url'
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, self.empty_element_suffix)
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
result = '<img src="%s" alt="%s"%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, self.empty_element_suffix)
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, basestring):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, name))
_setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M)
def _setext_h_sub(self, match):
n = {"=": 1, "-": 2}[match.group(2)[0]]
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(1),
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(1))
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
_atx_h_re = re.compile(r'''
^(\#{1,6}) # \1 = string of #'s
[ \t]*
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
''', re.X | re.M)
def _atx_h_sub(self, match):
n = len(match.group(1))
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(match.group(2),
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(match.group(2))
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
text = self._setext_h_re.sub(self._setext_h_sub, text)
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
text = self._atx_h_re.sub(self._atx_h_sub, text)
return text
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
for marker_pat in (self._marker_ul, self._marker_ol):
# Re-usable pattern to match any entire ul or ol list:
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
# We use a different prefix before nested lists than top-level lists.
# See extended comment in _process_list_items().
#
# Note: There's a bit of duplication here. My original implementation
# created a scalar regex pattern as the conditional result of the test on
# $g_list_level, and then only ran the $text =~ s{...}{...}egmx
# substitution once, using the scalar as the pattern. This worked,
# everywhere except when running under MT on my hosting account at Pair
# Networks. There, this caused all rebuilds to be killed by the reaper (or
# perhaps they crashed, but that seems incredibly unlikely given that the
# same script on the same server ran fine *except* under MT. I've spent
# more time trying to figure out why this is happening than I'd like to
# admit. My only guess, backed up by the fact that this workaround works,
# is that Perl optimizes the substition when it can figure out that the
# pattern will never change, and when this optimization isn't on, we run
# afoul of the reaper. Thus, the slightly redundant code to that uses two
# static s/// patterns rather than one conditional pattern.
if self.list_level:
sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
text = sub_list_re.sub(self._list_sub, text)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
text = list_re.sub(self._list_sub, text)
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter = HtmlCodeFormatter(cssclass="codehilite", **formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match):
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
lexer = self._get_pygments_lexer(lexer_name)
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
if lexer:
formatter_opts = self.extras['code-color'] or {}
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
# Now, escape characters that are magic in Markdown:
('*', self._escape_table['*']),
('_', self._escape_table['_']),
('{', self._escape_table['{']),
('}', self._escape_table['}']),
('[', self._escape_table['[']),
(']', self._escape_table[']']),
('\\', self._escape_table['\\']),
]
for before, after in replacements:
text = text.replace(before, after)
return text
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in self._escape_table.items():
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in link_from_hash.items():
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in self._escape_table.items():
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
_toc = None
@property
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append(u'%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(flag_from_char.keys())))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line)
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print "dedent: margin=%r" % margin
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print "dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin)
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<http://code.google.com/p/python-markdown2/wiki/Extras>.")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
for path in paths:
if opts.compare:
print "==== Markdown.pl ===="
perl_cmd = 'perl %s "%s"' % (markdown_pl, path)
o = os.popen(perl_cmd)
perl_html = o.read()
o.close()
sys.stdout.write(perl_html)
print "==== markdown2.py ===="
html = markdown_path(path, encoding=opts.encoding,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
sys.stdout.write(
html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print "==== match? %r ====" % (norm_perl_html == norm_html)
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
jdfekete/progressivis
|
refs/heads/master
|
tests/test_03_join3.py
|
1
|
from . import ProgressiveTest, skip
from progressivis import Print, Every
from progressivis.stats import Stats
from progressivis.io import CSVLoader
from progressivis.datasets import get_dataset
from progressivis.table.bin_join import BinJoin
from progressivis.table.constant import Constant
from progressivis.table.table import Table
from progressivis.table.reduce import Reduce
from progressivis.core import aio
import pandas as pd
def print_len(x):
if x is not None:
print(len(x))
class TestJoin3(ProgressiveTest):
@skip("Need fixing")
def test_join(self):
s = self.scheduler()
csv = CSVLoader(get_dataset('bigfile'), index_col=False, header=None,
scheduler=s)
stat1 = Stats(1, reset_index=True, scheduler=s)
stat1.input[0] = csv.output.result
stat2 = Stats(2, reset_index=True, scheduler=s)
stat2.input[0] = csv.output.result
stat3 = Stats(3, reset_index=True, scheduler=s)
stat3.input[0] = csv.output.result
# join=Join(scheduler=s)
# import pdb;pdb.set_trace()
join = Reduce.expand(BinJoin, "first", "second", "result",
[stat1.output.stats,
stat2.output.stats,
stat3.output.stats],
scheduler=s)
# reduce_.input[0] = stat1.output.stats
# reduce_.input[0] = stat2.output.stats
# join = reduce_.expand()
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = join.output.result
prlen = Every(proc=self.terse, constant_time=True, scheduler=s)
prlen.input[0] = csv.output.result
aio.run(s.start())
res = join.trace_stats(max_runs=1)
print(res)
def test_join_simple(self):
s = self.scheduler()
cst1 = Constant(Table(name='test_join_simple_cst1',
data=pd.DataFrame({'xmin': [1], 'xmax': [2]}),
create=True), scheduler=s)
cst2 = Constant(Table(name='test_join_simple_cst2',
data=pd.DataFrame({'ymin': [3], 'ymax': [4]}),
create=True), scheduler=s)
cst3 = Constant(Table(name='test_join_simple_cst3',
data=pd.DataFrame({'zmin': [5], 'zmax': [6]}),
create=True), scheduler=s)
# join=Join(scheduler=s)
# reduce_ = Reduce(BinJoin, "first", "second", "table", scheduler=s)
# reduce_.input[0] = cst1.output.result
# reduce_.input[0] = cst2.output.result
# reduce_.input[0] = cst3.output.result
# join = reduce_.expand()
join = Reduce.expand(BinJoin, "first", "second", "result",
[cst1.output.result,
cst2.output.result,
cst3.output.result],
scheduler=s)
pr = Print(proc=self.terse, scheduler=s)
pr.input[0] = join.output.result
aio.run(s.start())
res = join.trace_stats(max_runs=1)
print(res)
df = join.result
last = df.loc[df.index[-1]]
self.assertTrue(last['xmin'] == 1 and last['xmax'] == 2 and
last['ymin'] == 3 and last['ymax'] == 4 and
last['zmin'] == 5 and last['zmax'] == 6)
if __name__ == '__main__':
ProgressiveTest.main()
|
minhphung171093/OpenERP_V8
|
refs/heads/master
|
openerp/addons/hr_timesheet_sheet/report/hr_timesheet_report.py
|
194
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class hr_timesheet_report(osv.osv):
_inherit = "hr.timesheet.report"
_columns = {
'to_invoice': fields.many2one('hr_timesheet_invoice.factor', 'Type of Invoicing',readonly=True),
'nbr': fields.integer('# Nbr Timesheet',readonly=True),
'total_diff': fields.float('# Total Diff',readonly=True),
'total_timesheet': fields.float('# Total Timesheet',readonly=True),
'total_attendance': fields.float('# Total Attendance',readonly=True),
'department_id':fields.many2one('hr.department','Department',readonly=True),
'date_from': fields.date('Date from',readonly=True,),
'date_to': fields.date('Date to',readonly=True),
'state' : fields.selection([
('new', 'New'),
('draft','Draft'),
('confirm','Confirmed'),
('done','Done')], 'Status', readonly=True),
}
def _select(self):
return """
WITH
totals AS (
SELECT
d.sheet_id,
d.name as date,
sum(total_difference) / coalesce(sum(j.count),1) as total_diff,
sum(total_timesheet) / coalesce(sum(j.count),1) as total_timesheet,
sum(total_attendance) / coalesce(sum(j.count),1) as total_attendance
FROM hr_timesheet_sheet_sheet_day d left join (
SELECT
h.sheet_id,
a.date,
count(*)
FROM account_analytic_line a inner join hr_analytic_timesheet h ON (h.line_id=a.id)
GROUP BY h.sheet_id, a.date
) j ON (d.sheet_id = j.sheet_id AND d.name = j.date)
GROUP BY d.sheet_id, d.name
)
""" + super(hr_timesheet_report, self)._select() + """,
htss.name,
htss.date_from,
htss.date_to,
count(*) as nbr,
sum(t.total_diff) as total_diff,
sum(t.total_timesheet) as total_timesheet,
sum(t.total_attendance) as total_attendance,
aal.to_invoice,
htss.department_id,
htss.state"""
def _from(self):
return super(hr_timesheet_report, self)._from() + "left join hr_timesheet_sheet_sheet as htss ON (hat.sheet_id=htss.id) left join totals as t on (t.sheet_id = hat.sheet_id and t.date = aal.date)"
def _group_by(self):
return super(hr_timesheet_report, self)._group_by() + """,
htss.date_from,
htss.date_to,
aal.unit_amount,
aal.amount,
aal.to_invoice,
htss.name,
htss.state,
htss.id,
htss.department_id"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
vcaen/personalwebsite
|
refs/heads/master
|
app/controller/__init__.py
|
2
|
__author__ = 'vcaen'
|
krisys/django
|
refs/heads/master
|
tests/view_tests/app0/__init__.py
|
12133432
| |
emakis/erpnext
|
refs/heads/develop
|
erpnext/schools/doctype/student_group/__init__.py
|
12133432
| |
CylonicRaider/Instant
|
refs/heads/master
|
script/deanimate.py
|
1
|
#!/usr/bin/env python3
# -*- coding: ascii -*-
"""
A script removing animations from SVG graphics.
"""
import sys, os, re
# etree fails utterly at producing nice-looking XML
from xml.dom import minidom
def process(inpt, outp):
def traverse(node):
for child in node.childNodes:
if child.nodeType != minidom.Node.ELEMENT_NODE:
continue
elif child.tagName in ('animate', 'animateTransform'):
node.removeChild(child)
elif child.tagName in ('style', 'script'):
if child.getAttribute('key') == 'animation':
node.removeChild(child)
else:
traverse(child)
node.normalize()
if len(node.childNodes) == 0: return
for child in (node.childNodes[0], node.childNodes[-1]):
if child.nodeType != minidom.Node.TEXT_NODE:
continue
if not child.data.isspace() or child.data.count('\n') <= 1:
continue
if len(node.childNodes) == 1:
node.removeChild(child)
return
child.data = re.sub(r'\n.*\n', r'\n', child.data)
document = minidom.parse(inpt)
traverse(document.documentElement)
outp.write('<?xml version="1.0" encoding="utf-8"?>\n')
document.documentElement.writexml(outp)
outp.write('\n')
def main():
if len(sys.argv) != 3:
sys.stderr.write('USAGE: %s input output\n' % sys.argv[0])
sys.stderr.flush()
sys.exit(0)
with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:
process(inpt, outp)
if __name__ == '__main__': main()
|
y12uc231/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/__init__.py
|
101
|
"""
integration tests for xmodule
Contains:
1. BaseTestXmodule class provides course and users
for testing Xmodules with mongo store.
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.test.client import Client
from edxmako.shortcuts import render_to_string
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE
from xblock.field_data import DictFieldData
from xmodule.tests import get_test_system, get_test_descriptor_system
from opaque_keys.edx.locations import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from lms.djangoapps.lms_xblock.runtime import quote_slashes
class BaseTestXmodule(ModuleStoreTestCase):
"""Base class for testing Xmodules with mongo store.
This class prepares course and users for tests:
1. create test course;
2. create, enroll and login users for this course;
Any xmodule should overwrite only next parameters for test:
1. CATEGORY
2. DATA or METADATA
3. MODEL_DATA
4. COURSE_DATA and USER_COUNT if needed
This class should not contain any tests, because CATEGORY
should be defined in child class.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
USER_COUNT = 2
COURSE_DATA = {}
# Data from YAML common/lib/xmodule/xmodule/templates/NAME/default.yaml
CATEGORY = "vertical"
DATA = ''
# METADATA must be overwritten for every instance that uses it. Otherwise,
# if we'll change it in the tests, it will be changed for all other instances
# of parent class.
METADATA = {}
MODEL_DATA = {'data': '<some_module></some_module>'}
def new_module_runtime(self):
"""
Generate a new ModuleSystem that is minimally set up for testing
"""
return get_test_system(course_id=self.course.id)
def new_descriptor_runtime(self):
runtime = get_test_descriptor_system()
runtime.get_block = modulestore().get_item
return runtime
def initialize_module(self, **kwargs):
kwargs.update({
'parent_location': self.section.location,
'category': self.CATEGORY
})
self.item_descriptor = ItemFactory.create(**kwargs)
self.runtime = self.new_descriptor_runtime()
field_data = {}
field_data.update(self.MODEL_DATA)
student_data = DictFieldData(field_data)
self.item_descriptor._field_data = LmsFieldData(self.item_descriptor._field_data, student_data)
self.item_descriptor.xmodule_runtime = self.new_module_runtime()
#self.item_module = self.item_descriptor.xmodule_runtime.xmodule_instance
#self.item_module is None at this time
self.item_url = self.item_descriptor.location.to_deprecated_string()
def setup_course(self):
self.course = CourseFactory.create(data=self.COURSE_DATA)
# Turn off cache.
modulestore().request_cache = None
modulestore().metadata_inheritance_cache_subsystem = None
chapter = ItemFactory.create(
parent_location=self.course.location,
category="sequential",
)
self.section = ItemFactory.create(
parent_location=chapter.location,
category="sequential"
)
# username = robot{0}, password = 'test'
self.users = [
UserFactory.create()
for dummy0 in range(self.USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
# login all users for acces to Xmodule
self.clients = {user.username: Client() for user in self.users}
self.login_statuses = [
self.clients[user.username].login(
username=user.username, password='test')
for user in self.users
]
self.assertTrue(all(self.login_statuses))
def setUp(self):
super(BaseTestXmodule, self).setUp()
self.setup_course()
self.initialize_module(metadata=self.METADATA, data=self.DATA)
def get_url(self, dispatch):
"""Return item url with dispatch."""
return reverse(
'xblock_handler',
args=(self.course.id.to_deprecated_string(), quote_slashes(self.item_url), 'xmodule_handler', dispatch)
)
class XModuleRenderingTestBase(BaseTestXmodule):
def new_module_runtime(self):
"""
Create a runtime that actually does html rendering
"""
runtime = super(XModuleRenderingTestBase, self).new_module_runtime()
runtime.render_template = render_to_string
return runtime
|
yxtj/Neuron
|
refs/heads/master
|
dataAnalysis/activation.py
|
2
|
import math
import process_base as base
import myalgorithm as alg
def cal_dif_pair(line_from,line_to,n_dig=4):
res=[]
length_f=len(line_from)
length_t=len(line_to)
if length_f==0 or length_t==0:
return res
j=0
start_p=0
while start_p<length_f and line_from[start_p]<line_to[0]:
start_p+=1
for i in range(start_p+1,length_f):
t=line_from[i]
v=round(t-line_from[i-1],n_dig)
while j+1<length_t and line_to[j+1]<t:
j+=1
#TODO: finish setting
if j==length_t:
break
res.append((v,round(t-line_to[j],n_dig)))
return res
def activation_time_pair(line_from,line_to,n_dig=4):
lf=len(line_from)
lt=len(line_to)
res=[]
p=0
for i in range(lf):
v=line_from[i]
p=alg.lower_bound(line_to,p,lt,v)
if p==lt:
break
res.append(round(line_to[p]-v,n_dig))
return res
def activation_time_one(line_from,data,n_dig=4):
lf=len(line_from)
n_nodes=len(data)
lt=[len(l) for l in data]
res=[]
p=[None for i in range(n_nodes)]
for i in range(lf):
temp=[0 for j in range(n_nodes)]
v=line_from[i]
for j in range(n_nodes):
p[j]=alg.lower_bound(line_to,p[j],lt[j],v)
if p[j]==lt[j]:
continue
temp[j]=round(line_to[p[j]]-v,n_dig)
res.append(temp)
return res
'''calculate the nearest activation time from all neuron to a specific neuron
'''
def cal_dif_one_table(line,data,rng,n_dig=4):
n=len(data)
if n!=len(rng):
raise ValueError('Unmatched lenght of data ('+str(n)+') and rng ('+str(len(rng))+').')
length=len(line)
res=[]
if length==0:
return res
start_j=[rng[i][0] for i in range(n)]
end_j=[rng[i][1] for i in range(n)]
start_i=0
for i in range(n):
while start_i<length and line[start_i]<data[i][0]:
start_i+=1
for i in range(start_i+1,length):
t=line[i]
v=round(t-line[i-1],n_dig)
temp=[v]
for j in range(n):
while start_j[j]+1<end_j[j] and data[j][start_j[j]+1]<t:
start_j[j]+=1
#when the loop finished, data[j][start[j]]<t<=data[j][start_j[j]+1]
if start_j[j]==end_j[j]:
break
temp.append(round(t-data[j][start_j[j]],n_dig))
res.append(temp)
return res
def cal_all_dif_pair(data,n_dig=4):
n=len(data)
#res=[[[] for j in range(n)] for i in range(n)]
res=[]
for i in range(n):
temp=[]
for j in range(n):
l=cal_dif_pair(data[i],data[j],n_dig)
temp.append(l)
res.append(temp)
return res
def write_dif_pair_one(filename,pair_data):
f=open(filename,'w')
for x,y in pair_data:
f.write(str(x)+' '+str(y)+'\n')
f.close()
def write_dif_pair_all(filename_prefix,data):
n=len(data)
for i in range(n):
for j in range(n):
fn=filename_prefix+'_'+str(i+1)+'_'+str(j+1)+'.txt'
write_dif_pair_one(fn,data[i][j])
|
dcramer/django-compositepks
|
refs/heads/master
|
tests/modeltests/update/models.py
|
8
|
"""
Tests for the update() queryset method that allows in-place, multi-object
updates.
"""
from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=20)
value = models.CharField(max_length=20)
another_value = models.CharField(max_length=20, blank=True)
def __unicode__(self):
return unicode(self.name)
class RelatedPoint(models.Model):
name = models.CharField(max_length=20)
data = models.ForeignKey(DataPoint)
def __unicode__(self):
return unicode(self.name)
__test__ = {'API_TESTS': """
>>> DataPoint(name="d0", value="apple").save()
>>> DataPoint(name="d2", value="banana").save()
>>> d3 = DataPoint.objects.create(name="d3", value="banana")
>>> RelatedPoint(name="r1", data=d3).save()
Objects are updated by first filtering the candidates into a queryset and then
calling the update() method. It executes immediately and returns nothing.
>>> DataPoint.objects.filter(value="apple").update(name="d1")
1
>>> DataPoint.objects.filter(value="apple")
[<DataPoint: d1>]
We can update multiple objects at once.
>>> DataPoint.objects.filter(value="banana").update(value="pineapple")
2
>>> DataPoint.objects.get(name="d2").value
u'pineapple'
Foreign key fields can also be updated, although you can only update the object
referred to, not anything inside the related object.
>>> d = DataPoint.objects.get(name="d1")
>>> RelatedPoint.objects.filter(name="r1").update(data=d)
1
>>> RelatedPoint.objects.filter(data__name="d1")
[<RelatedPoint: r1>]
Multiple fields can be updated at once
>>> DataPoint.objects.filter(value="pineapple").update(value="fruit", another_value="peaches")
2
>>> d = DataPoint.objects.get(name="d2")
>>> d.value, d.another_value
(u'fruit', u'peaches')
In the rare case you want to update every instance of a model, update() is also
a manager method.
>>> DataPoint.objects.update(value='thing')
3
>>> DataPoint.objects.values('value').distinct()
[{'value': u'thing'}]
We do not support update on already sliced query sets.
>>> DataPoint.objects.all()[:2].update(another_value='another thing')
Traceback (most recent call last):
...
AssertionError: Cannot update a query once a slice has been taken.
"""
}
|
jazkarta/edx-platform
|
refs/heads/master
|
lms/djangoapps/class_dashboard/tests/test_views.py
|
133
|
"""
Tests for class dashboard (Metrics tab in instructor dashboard)
"""
import json
from django.test.client import RequestFactory
from mock import patch
from nose.plugins.attrib import attr
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from class_dashboard import views
from student.tests.factories import AdminFactory
@attr('shard_1')
class TestViews(ModuleStoreTestCase):
"""
Tests related to class_dashboard/views.py
"""
def setUp(self):
super(TestViews, self).setUp()
self.request_factory = RequestFactory()
self.request = self.request_factory.get('')
self.request.user = None
self.simple_data = {'error': 'error'}
@patch('class_dashboard.views.has_instructor_access_for_class')
def test_all_problem_grade_distribution_has_access(self, has_access):
"""
Test returns proper value when have proper access
"""
has_access.return_value = True
response = views.all_problem_grade_distribution(self.request, 'test/test/test')
self.assertEqual(json.dumps(self.simple_data), response.content)
@patch('class_dashboard.views.has_instructor_access_for_class')
def test_all_problem_grade_distribution_no_access(self, has_access):
"""
Test for no access
"""
has_access.return_value = False
response = views.all_problem_grade_distribution(self.request, 'test/test/test')
self.assertEqual("{\"error\": \"Access Denied: User does not have access to this course\'s data\"}", response.content)
@patch('class_dashboard.views.has_instructor_access_for_class')
def test_all_sequential_open_distribution_has_access(self, has_access):
"""
Test returns proper value when have proper access
"""
has_access.return_value = True
response = views.all_sequential_open_distrib(self.request, 'test/test/test')
self.assertEqual(json.dumps(self.simple_data), response.content)
@patch('class_dashboard.views.has_instructor_access_for_class')
def test_all_sequential_open_distribution_no_access(self, has_access):
"""
Test for no access
"""
has_access.return_value = False
response = views.all_sequential_open_distrib(self.request, 'test/test/test')
self.assertEqual("{\"error\": \"Access Denied: User does not have access to this course\'s data\"}", response.content)
@patch('class_dashboard.views.has_instructor_access_for_class')
def test_section_problem_grade_distribution_has_access(self, has_access):
"""
Test returns proper value when have proper access
"""
has_access.return_value = True
response = views.section_problem_grade_distrib(self.request, 'test/test/test', '1')
self.assertEqual(json.dumps(self.simple_data), response.content)
@patch('class_dashboard.views.has_instructor_access_for_class')
def test_section_problem_grade_distribution_no_access(self, has_access):
"""
Test for no access
"""
has_access.return_value = False
response = views.section_problem_grade_distrib(self.request, 'test/test/test', '1')
self.assertEqual("{\"error\": \"Access Denied: User does not have access to this course\'s data\"}", response.content)
def test_sending_deprecated_id(self):
course = CourseFactory.create()
instructor = AdminFactory.create()
self.request.user = instructor
response = views.all_sequential_open_distrib(self.request, course.id.to_deprecated_string())
self.assertEqual('[]', response.content)
response = views.all_problem_grade_distribution(self.request, course.id.to_deprecated_string())
self.assertEqual('[]', response.content)
response = views.section_problem_grade_distrib(self.request, course.id.to_deprecated_string(), 'no section')
self.assertEqual('{"error": "error"}', response.content)
|
orion1024/Sick-Beard
|
refs/heads/master
|
lib/hachoir_parser/game/__init__.py
|
90
|
from lib.hachoir_parser.game.zsnes import ZSNESFile
from lib.hachoir_parser.game.spider_man_video import SpiderManVideoFile
from lib.hachoir_parser.game.laf import LafFile
from lib.hachoir_parser.game.blp import BLP1File, BLP2File
|
cecep-edu/edx-platform
|
refs/heads/eucalyptus.2
|
common/djangoapps/microsite_configuration/tests/backends/test_base.py
|
38
|
"""
Test Microsite base backends.
"""
import logging
from mock import patch
from django.conf import settings
from django.test import TestCase
from microsite_configuration import microsite
from microsite_configuration.backends.base import (
AbstractBaseMicrositeBackend,
BaseMicrositeBackend
)
log = logging.getLogger(__name__)
class NullBackend(AbstractBaseMicrositeBackend):
"""
A class that does nothing but inherit from the base class.
We created this class to test methods of AbstractBaseMicrositeBackend class.
Since abstract class cannot be instantiated we created this wrapper class.
"""
def set_config_by_domain(self, domain):
"""
For a given request domain, find a match in our microsite configuration
and make it available to the complete django request process
"""
return super(NullBackend, self).set_config_by_domain(domain)
def get_template_path(self, relative_path, **kwargs):
"""
Returns a path (string) to a Mako template, which can either be in
an override or will just return what is passed in which is expected to be a string
"""
return super(NullBackend, self).get_template_path(relative_path, **kwargs)
def get_value(self, val_name, default=None, **kwargs):
"""
Returns a value associated with the request's microsite, if present
"""
return super(NullBackend, self).get_value(val_name, default, **kwargs)
def get_dict(self, dict_name, default=None, **kwargs):
"""
Returns a dictionary product of merging the request's microsite and
the default value.
This can be used, for example, to return a merged dictonary from the
settings.FEATURES dict, including values defined at the microsite
"""
return super(NullBackend, self).get_dict(dict_name, default, **kwargs)
def is_request_in_microsite(self):
"""
This will return True/False if the current request is a request within a microsite
"""
return super(NullBackend, self).is_request_in_microsite()
def has_override_value(self, val_name):
"""
Returns True/False whether a Microsite has a definition for the
specified named value
"""
return super(NullBackend, self).has_override_value(val_name)
def get_all_config(self):
"""
This returns a set of orgs that are considered within all microsites.
This can be used, for example, to do filtering
"""
return super(NullBackend, self).get_all_config()
def get_value_for_org(self, org, val_name, default=None):
"""
This returns a configuration value for a microsite which has an org_filter that matches
what is passed in
"""
return super(NullBackend, self).get_value_for_org(org, val_name, default)
def get_all_orgs(self):
"""
This returns a set of orgs that are considered within a microsite. This can be used,
for example, to do filtering
"""
return super(NullBackend, self).get_all_orgs()
def clear(self):
"""
Clears out any microsite configuration from the current request/thread
"""
return super(NullBackend, self).clear()
class AbstractBaseMicrositeBackendTests(TestCase):
"""
Go through and test the base abstract class
"""
def test_cant_create_instance(self):
"""
We shouldn't be able to create an instance of the base abstract class
"""
with self.assertRaises(TypeError):
AbstractBaseMicrositeBackend() # pylint: disable=abstract-class-instantiated
def test_not_yet_implemented(self):
"""
Make sure all base methods raise a NotImplementedError exception
"""
backend = NullBackend()
with self.assertRaises(NotImplementedError):
backend.set_config_by_domain(None)
with self.assertRaises(NotImplementedError):
backend.get_value(None, None)
with self.assertRaises(NotImplementedError):
backend.get_dict(None, None)
with self.assertRaises(NotImplementedError):
backend.is_request_in_microsite()
with self.assertRaises(NotImplementedError):
backend.has_override_value(None)
with self.assertRaises(NotImplementedError):
backend.get_all_config()
with self.assertRaises(NotImplementedError):
backend.clear()
with self.assertRaises(NotImplementedError):
backend.get_value_for_org(None, None, None)
with self.assertRaises(NotImplementedError):
backend.get_all_orgs()
@patch(
'microsite_configuration.microsite.BACKEND',
microsite.get_backend(
'microsite_configuration.backends.base.BaseMicrositeBackend', BaseMicrositeBackend
)
)
class BaseMicrositeBackendTests(TestCase):
"""
Go through and test the BaseMicrositeBackend class for behavior which is not
overriden in subclasses
"""
def test_enable_microsites_pre_startup(self):
"""
Tests microsite.test_enable_microsites_pre_startup works as expected.
"""
# remove microsite root directory paths first
settings.DEFAULT_TEMPLATE_ENGINE['DIRS'] = [
path for path in settings.DEFAULT_TEMPLATE_ENGINE['DIRS']
if path != settings.MICROSITE_ROOT_DIR
]
with patch('microsite_configuration.backends.base.BaseMicrositeBackend.has_configuration_set',
return_value=False):
microsite.enable_microsites_pre_startup(log)
self.assertNotIn(settings.MICROSITE_ROOT_DIR,
settings.DEFAULT_TEMPLATE_ENGINE['DIRS'])
with patch('microsite_configuration.backends.base.BaseMicrositeBackend.has_configuration_set',
return_value=True):
microsite.enable_microsites_pre_startup(log)
self.assertIn(settings.MICROSITE_ROOT_DIR,
settings.DEFAULT_TEMPLATE_ENGINE['DIRS'])
|
hisato-kawaji/pyrowork-core
|
refs/heads/master
|
app/functions/sample/__init__.py
|
12133432
| |
centwave/jg82ksgvqkuan
|
refs/heads/master
|
django/conf/locale/es_AR/__init__.py
|
12133432
| |
richardcs/ansible
|
refs/heads/devel
|
test/units/modules/network/nxos/__init__.py
|
12133432
| |
ncliam/serverpos
|
refs/heads/master
|
openerp/addons/crm/report/report_businessopp.py
|
377
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os, time
import random
import StringIO
from openerp.report.render import render
from openerp.report.interface import report_int
from pychart import *
theme.use_color = 1
class external_pdf(render):
""" Generate External PDF """
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type = 'pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
""" Create Custom Report """
def create(self, cr, uid, ids, datas, context=None):
""" @param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of IDs
@param context: A standard dictionary for contextual values """
assert len(ids), 'You should provide some ids!'
responsible_data = {}
responsible_names = {}
data = []
minbenef = 999999999999999999999
maxbenef = 0
cr.execute('select probability, planned_revenue, planned_cost, user_id,\
res_users.name as name from crm_case left join res_users on \
(crm_case.user_id=res_users.id) where crm_case.id IN %s order by user_id',(tuple(ids),))
res = cr.dictfetchall()
for row in res:
proba = row['probability'] or 0 / 100.0
cost = row['planned_cost'] or 0
revenue = row['planned_revenue'] or 0
userid = row['user_id'] or 0
benefit = revenue - cost
if benefit > maxbenef:
maxbenef = benefit
if benefit < minbenef:
minbenef = benefit
tuple_benefit = (proba * 100, benefit)
responsible_data.setdefault(userid, [])
responsible_data[userid].append(tuple_benefit)
tuple_benefit = (proba * 100, cost, benefit)
data.append(tuple_benefit)
responsible_names[userid] = (row['name'] or '/').replace('/','//')
minbenef -= maxbenef * 0.05
maxbenef *= 1.2
ratio = 0.5
minmaxdiff2 = (maxbenef - minbenef)/2
for l in responsible_data.itervalues():
for i in range(len(l)):
percent, benef = l[i]
proba = percent/100
current_ratio = 1 + (ratio-1) * proba
newbenef = minmaxdiff2 + ((benef - minbenef - minmaxdiff2) * current_ratio)
l[i] = (percent, newbenef)
#TODO:
#-group by "categorie de probabilites ds graphe du haut"
#-echelle variable
pdf_string = StringIO.StringIO()
can = canvas.init(fname = pdf_string, format = 'pdf')
chart_object.set_defaults(line_plot.T, line_style=None)
xaxis = axis.X(label=None, format="%d%%", tic_interval=20)
yaxis = axis.Y()
x_range_a, x_range_b = (0, 100)
y_range_a, y_range_b = (minbenef, maxbenef)
if y_range_a == 0.0:
y_range_a += 0.0001
ar = area.T(
size = (300,200),
y_grid_interval = 10000,
y_grid_style = None,
x_range = (x_range_a, x_range_b),
y_range = (y_range_a, y_range_b),
x_axis = xaxis,
y_axis = None,
legend = legend.T()
)
#import pydb; pydb.debugger()
for k, d in responsible_data.iteritems():
fill = fill_style.Plain(bgcolor=color.T(r=random.random(), g=random.random(), b=random.random()))
tick = tick_mark.Square(size=6, fill_style=fill)
ar.add_plot(line_plot.T(label=responsible_names[k], data=d, tick_mark=tick))
ar.draw(can)
# second graph (top right)
ar = area.T(legend = legend.T(),
size = (200,100),
loc = (100,250),
x_grid_interval = lambda min, max: [40,60,80,100],
x_grid_style = line_style.gray70_dash1,
x_range = (33, 100),
x_axis = axis.X(label=None, minor_tic_interval = lambda min,max: [50, 70, 90],\
format=lambda x: ""),
y_axis = axis.Y(label="Planned amounts"))
bar_plot.fill_styles.reset();
plot1 = bar_plot.T(label="Cost", data=data, fill_style=fill_style.red)
plot2 = bar_plot.T(label="Revenue", data=data, hcol=2, stack_on = plot1, fill_style=fill_style.blue)
ar.add_plot(plot1, plot2)
ar.draw(can)
# diagonal "pipeline" lines
can.line(line_style.black, 0, 200, 300, 150)
can.line(line_style.black, 0, 0, 300, 50)
# vertical lines
ls = line_style.T(width=0.4, color=color.gray70, dash=(2, 2))
for x in range(120, 300, 60):
can.line(ls, x, 0, x, 250)
# draw arrows to the right
a = arrow.fat1
for y in range(60, 150, 10):
a.draw([(285, y), (315, y)], can=can)
# close canvas so that the file is written to "disk"
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return (self.obj.pdf, 'pdf')
report_custom('report.crm.case')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
fanout/django-eventstream
|
refs/heads/master
|
setup.py
|
1
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
readme = f.read()
install_requires = []
install_requires.extend(['PyJWT>=1.5,<3', 'gripcontrol>=4.0,<5', 'django_grip>=3.0,<4', 'Werkzeug>=0.12,<1', 'six>=1.10,<2'])
setup(
name='django-eventstream',
version='4.2.0',
description='Server-Sent Events for Django',
long_description=readme,
long_description_content_type='text/markdown',
author='Justin Karneges',
author_email='justin@fanout.io',
url='https://github.com/fanout/django-eventstream',
license='MIT',
zip_safe=False,
packages=['django_eventstream', 'django_eventstream.migrations', 'django_eventstream.management', 'django_eventstream.management.commands'],
package_data={'django_eventstream': ['static/django_eventstream/*']},
install_requires=install_requires,
tests_require=['Django>=2.0'],
test_suite='tests.runtests.runtests',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Framework :: Django',
]
)
|
ondra-novak/blink
|
refs/heads/nw
|
LayoutTests/http/tests/websocket/fragmented-binary-frames_wsh.py
|
53
|
from mod_pywebsocket import common
from mod_pywebsocket import stream
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
messages_to_send = [['Hello, ', 'world!'],
['', 'Hello, ', '', 'world!', ''],
['', '', ''],
[chr(i) for i in xrange(256)]]
for message_list in messages_to_send:
for index, message in enumerate(message_list):
# FIXME: Should use better API to send binary messages when pywebsocket supports it.
if index == 0:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_CONTINUATION
if index < len(message_list) - 1:
final = 0
else:
final = 1
header = stream.create_header(opcode, len(message), final, 0, 0, 0, 0)
request.connection.write(header + message)
|
topazproject/topaz
|
refs/heads/master
|
topaz/utils/cache.py
|
3
|
from rpython.rlib.objectmodel import specialize
class Cache(object):
def __init__(self, space):
self.space = space
self.contents = {}
@specialize.memo()
def getorbuild(self, key):
try:
return self.contents[key]
except KeyError:
builder = self._build(key)
self.contents[key] = builder.next()
try:
builder.next()
except StopIteration:
pass
else:
raise RuntimeError("generator didn't stop")
return self.contents[key]
def _freeze_(self):
return True
|
ninjaotoko/dynaform
|
refs/heads/master
|
dynaform/models.py
|
1
|
# *-* coding=utf-8 *-*
import StringIO
from django import forms
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.contrib.sites.models import Site
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.mail import EmailMultiAlternatives
from django.core.serializers.json import DjangoJSONEncoder
from django.template import Context, Template, RequestContext
from django.template.defaultfilters import slugify, force_escape, escape, safe
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_unicode, python_2_unicode_compatible
#from djblog.common.models import MultiSiteBaseModel, GenericRelationModel
from dynaform.forms.widgets import DYNAFORM_FIELDS, DYNAFORM_WIDGETS
#from django.contrib.postgres.fields import HStoreField
try:
import json
except ImportError:
from django.utils import simplejson as json
import logging
log = logging.getLogger(__name__)
class JsonField(models.Field):
"""
JSON Field
"""
#__metaclass__ = models.SubfieldBase
serialize_to_string = True
def get_internal_type(self):
return "TextField"
def value_to_string(self, obj):
return self.get_prep_value(self._get_val_from_obj(obj))
def get_prep_value(self, value):
if value:
if isinstance(value, StringIO.StringIO):
value = value.getvalue()
stream = StringIO.StringIO()
json.dump(value, stream, cls=DjangoJSONEncoder)
value = stream.getvalue()
stream.close()
return value
return None
def to_python(self, value):
if value == "":
return None
if isinstance(value, basestring):
value = json.loads(value)
elif isinstance(value, StringIO.StringIO):
value = value.getvalue()
value = json.load(value)
return value
class MultiSiteBaseManager(models.Manager):
def get_queryset(self, *args, **kwargs):
"""
Registros para el site actual o sin site
"""
qs = super(MultiSiteBaseManager, self).get_queryset(*args, **kwargs)
qs = qs.filter(models.Q(site__id__in=[settings.SITE_ID,]) | models.Q(site__isnull=True))
return qs
def get_for_lang(self, *args, **kwargs):
"""
Registros para el idioma actual o sin idioma
"""
qs = self.get_queryset(*args, **kwargs)
if 'django.middleware.locale.LocaleMiddleware' in getattr(settings, 'MIDDLEWARE_CLASSES', []):
return qs.filter(models.Q(lang__iexact=translation.get_language()) | models.Q(lang__exact=''))
else:
log.warning('NO get for lang %s', translation.get_language())
return qs
def get_for_site_or_none(self, *args, **kwargs):
"""
Registros para el site actual
"""
qs = super(MultiSiteBaseManager, self).get_queryset(*args, **kwargs)
return qs.filter(site__id__in=[settings.SITE_ID,])
class MultiSiteBaseManagerAdmin(models.Manager):
pass
class MultiSiteBaseModel(models.Model):
"""
Base para Multi Site y Lang
"""
lang = models.CharField(max_length=20, blank=True, choices=settings.LANGUAGES)
site = models.ManyToManyField(Site, blank=True, null=True, related_name="%(app_label)s_%(class)s_related")
# el primero es el que luego es llamado con _default_manager
objects_for_admin = MultiSiteBaseManagerAdmin()
objects = MultiSiteBaseManager()
class Meta:
abstract = True
class GenericRelationManager(models.Manager):
def for_model(self, model):
"""
Para el modelo en particular y/o su instancia o clase
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
class GenericRelationModel(models.Model):
content_type = models.ForeignKey(ContentType, blank=True, null=True, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.PositiveIntegerField(_('object PK'), blank=True, null=True)
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
objects = GenericRelationManager()
def __unicode__(self):
return u"%s" % self.content_object
class Meta:
abstract = True
class DynaFormTracking(models.Model):
pub_date = models.DateTimeField(auto_now=True, verbose_name=_(u"Fecha de creación"))
site = models.ForeignKey(Site)
lang = models.CharField(max_length=20, choices=settings.LANGUAGES, default=settings.LANGUAGE_CODE.lower())
sender = models.CharField(max_length=200)
utm_source = models.CharField(max_length=200, blank=True, null=True)
utm_medium = models.CharField(max_length=200, blank=True, null=True)
utm_campaign = models.CharField(max_length=200, blank=True, null=True)
data = JsonField()
#data = HStoreField()
object_form = models.ForeignKey('DynaFormForm', blank=True, null=True)
def __unicode__(self, *args, **kwargs):
return u"%s %s" % (self.pub_date, self.sender)
def save(self, *args, **kwargs):
super(DynaFormTracking, self).save(*args, **kwargs)
def get_data(self):
if isinstance(self.data, dict):
return self.data
elif isinstance(self.data, (list, tuple)):
return dict(zip(xrange(len(self.data)),self.data))
#from south.modelsinspector import add_introspection_rules
#add_introspection_rules([], ["^nebula\.dynaform\.models\.JsonField"])
################################################################################
# Base Model DynaForm
################################################################################
class DynaFormField(GenericRelationModel):
field_name = models.SlugField(_(u"Identificador del Campo"), max_length=200, help_text=_(u"nombre del campo solo letras minúsculas, guinobajo y numeros "))
field_label = models.CharField(_(u"Etiqueta del Campo"), max_length=200, help_text=_(u"nombre visible del campo"))
field_type = models.CharField(max_length=100, choices=DYNAFORM_FIELDS)
field_widget = models.CharField(max_length=100, choices=DYNAFORM_WIDGETS, blank=True, null=True)
field_help = models.CharField(_(u"Texto descripción"), max_length=200, blank=True)
is_required = models.BooleanField(_(u"Requerido"), default=True)
is_hidden = models.BooleanField(_(u"Oculto"), default=False)
default_value = models.CharField(_(u"Valor por defecto"), max_length=200, blank=True, help_text=_(u"Se pueden usar variables del contexto {{ object }}, {{ sites }}, etc"))
choices = models.TextField(_(u"Lista de valores"), blank=True, help_text=_(u"Lista de \"valor\", \"título\" separada por el delimitador y por línea"))
choices_delimiter = models.CharField(_(u"Delimitador de valores por defecto es |"), max_length=1, blank=True, default='|')
##########################################################################
# Choices por Modelo
##########################################################################
choices_queryset = models.ForeignKey(ContentType, verbose_name=_(u"Modelo de Datos"), blank=True, null=True)
choices_queryset_filter = models.CharField(_(u"Filtros"), max_length=200, blank=True)
choices_queryset_empty_label = models.CharField(_(u"Valor por defecto"), max_length=100, blank=True, default="------")
choices_queryset_label = models.CharField(_(u"Formato"), max_length=100, blank=True, help_text=_(u"puede ser cualquier campo del modelo en formato, \"%(nombre)s, %(apellido)s\""))
choices_related_field = models.ForeignKey('self', blank=True, null=True)
field_order = models.PositiveSmallIntegerField(default=1)
class Meta:
ordering = ['field_order', ]
def __unicode__(self):
return unicode(self.field_name)
def choices_queryset_queryset(self, *args, **kwargs):
"""
Resuelve el modelo y genera el queryset para luego filtrar
"""
import re
and_split = re.compile('(?:\s+AND\s+)')
qs = []
if self.choices_queryset and self.field_type in \
("ModelChoiceField","ModelMultipleChoiceField"):
qs = self.choices_queryset.get_all_objects_for_this_type()
if self.choices_queryset_filter:
filter_args = dict([f.split('=') for f in self.choices_queryset_filter.split(',')])
# testing AND y OR
# and_split.split("name__in=[1,2,4,5, 'AND', ' AND THEN...'] AND id__gt=2")
# ["name__in=[1,2,4,5, 'AND ']", ' AND ', 'id__gt=2]
# print and_split.split(self.choices_queryset_filter)
# filter_args = dict([f.split('=') for f in and_split.split(self.choices_queryset_filter)])
if filter_args:
qs = qs.filter(**filter_args)
return qs
@python_2_unicode_compatible
class DynaFormTemplate(MultiSiteBaseModel):
"""
Templates for dinamic forms for subject, body and form itself
"""
TEMPLATE_TYPES_SUBJECT = "subject"
TEMPLATE_TYPES_BODY = "body"
TEMPLATE_TYPES_FORM = "form"
DYNAFORM_TEMPLATE_TYPES = (
(TEMPLATE_TYPES_SUBJECT, 'Email Subject'),
(TEMPLATE_TYPES_BODY, 'Email Body'),
(TEMPLATE_TYPES_FORM, 'Form Template'),
)
multisite_unique_together = ('slug',)
template_type = models.CharField(_(u"Tipo de template"), max_length=100,
choices=DYNAFORM_TEMPLATE_TYPES, default=TEMPLATE_TYPES_FORM)
name = models.CharField(_(u"Nombre del template"), max_length=100, help_text=_(u"ej: Subject Contacto"))
slug = models.SlugField(max_length=100)
html = models.TextField(_(u"HTML del Template"), help_text=_(u"parsea del contexto, y templatetags"))
is_plain = models.BooleanField(choices=[
(True, 'Texto Plano'),
(False, 'HTML')
], default=True)
def __str__(self):
return u"{name}".format(**self.__dict__)
def __unicode__(self):
return unicode(self.name)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(DynaFormTemplate, self).save(*args, **kwargs)
def get_template(self):
return Template(self.html)
def render(self, context):
return self.get_template().render(context)
class DynaFormForm(MultiSiteBaseModel):
"""
Create dinamic forms from database
"""
multisite_unique_together = ('slug',)
name = models.CharField(_(u"Nombre del form"), max_length=100)
slug = models.SlugField(max_length=100)
is_active = models.BooleanField(_(u"Es activo"), default=True, help_text=_(u"activa para usar en el frontend"))
form_title = models.CharField(_(u"Título del form"), max_length=200)
form_template = models.ForeignKey(DynaFormTemplate, related_name="dynaform_form_template_related", blank=True, null=True)
##########################################################################
# Enviar mail al guardar
##########################################################################
send_email = models.BooleanField(_("Enviar mail"), default=True)
from_email = models.CharField(max_length=100, default=settings.DEFAULT_FROM_EMAIL, blank=True)
# DEPRECATED
#recipient_list = models.TextField(_(u"Lista de destinatarios"), default=settings.MANAGERS, blank=True,
# help_text=_(u"ej: lista separada por líneas y coma.<br>Juan Pérez, juanperez@dominio.com<br>Maria Gomez, mariagomez@dominio.com"))
subject_template = models.ForeignKey(DynaFormTemplate,
related_name="dynaform_subject_template_related",
limit_choices_to={
'template_type': DynaFormTemplate.TEMPLATE_TYPES_SUBJECT
},
blank=True, null=True
)
body_template = models.ForeignKey(DynaFormTemplate,
related_name="dynaform_body_template_related",
limit_choices_to={
'template_type': DynaFormTemplate.TEMPLATE_TYPES_BODY
},
blank=True, null=True
)
error_class = models.CharField(_(u"Clase CSS para error"), max_length=40, default="error")
required_css_class = models.CharField(_(u"Clase CSS para requerido"), max_length=40, default="required")
##########################################################################
# nuevo autorespondedor
##########################################################################
autorespond = models.BooleanField(_(u"Autoresponder"), default=False)
autorespond_subject_template = models.ForeignKey(DynaFormTemplate, blank=True, null=True, related_name="dynaform_autorespond_subject_template_related")
autorespond_body_template = models.ForeignKey(DynaFormTemplate, blank=True, null=True, related_name="dynaform_autorespond_body_template_related")
autorespond_email_field = models.CharField(_("Campo de email"), default='email', max_length=40)
def __unicode__(self):
return unicode(self.name)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(DynaFormForm, self).save(*args, **kwargs)
def get_fields(self):
return DynaFormField.objects.for_model(self)
@models.permalink
def get_absolute_url(self):
return ('dynaform_action', (), {'slug': self.slug, 'pk': self.pk})
def clone(self):
"""
Hace un clon de la instania actual
"""
# recrea la instancia del form
form_clone = DynaFormForm(
lang = self.lang,
name = "clon de %s" % self.name,
is_active = self.is_active,
form_title = self.form_title,
form_template = self.form_template,
send_email = self.send_email,
from_email = self.from_email,
#recipient_list = self.recipient_list,
subject_template = self.subject_template,
body_template = self.body_template,
error_class = self.error_class,
required_css_class = self.required_css_class,
autorespond = self.autorespond,
autorespond_subject_template = self.autorespond_subject_template,
autorespond_body_template = self.autorespond_body_template,
autorespond_email_field = self.autorespond_email_field
)
form_clone.save()
content_type = ContentType.objects.get_for_model(form_clone)
# recrea todos los fields
for field in self.get_fields():
field_clone = DynaFormField(
content_type = content_type,
object_pk = form_clone.pk,
field_name = field.field_name,
field_label = field.field_label,
field_type = field.field_type,
field_widget = field.field_widget,
field_help = field.field_help,
is_required = field.is_required,
is_hidden = field.is_hidden,
default_value = field.default_value,
choices = field.choices,
choices_delimiter = field.choices_delimiter,
choices_queryset = field.choices_queryset,
choices_queryset_filter = field.choices_queryset_filter,
choices_queryset_empty_label = field.choices_queryset_empty_label,
choices_queryset_label = field.choices_queryset_label,
choices_related_field = field.choices_related_field,
field_order = field.field_order
)
field_clone.save()
@python_2_unicode_compatible
class DynaFormRecipientList(MultiSiteBaseModel):
ALTERNATE_SEND_ALWAYS = 'ALWAYS' # envia siempre
ALTERNATE_SEND_AUTO = 'AUTO' # alterna entre todos
ALTERNATE_SEND_IF = 'IF' # envia si se cumple una condición
ALTERNATE_SEND_CHOICES = [
(ALTERNATE_SEND_ALWAYS, u"Enviar siempre"),
(ALTERNATE_SEND_AUTO, u"Alternar (automático)"),
(ALTERNATE_SEND_IF, u"Si cumple la condición"),
]
object_form = models.ForeignKey('DynaFormForm')
name = models.CharField(_(u"Nombre"), max_length=100)
email = models.EmailField(_(u"Email"))
alternate_send = models.CharField(_(u"Enviar/Alternar"), max_length=100,
choices=ALTERNATE_SEND_CHOICES, default=ALTERNATE_SEND_ALWAYS)
alternate_condition = models.CharField(_(u"Condicional"), max_length=200,
blank=True, null=True,
help_text=_(u"Se pueden usar variables del contexto {{ object }}, \
{{ sites }}, etc. Ej: "))
alternate_index = models.PositiveIntegerField(default=0,
choices=[(i, i) for i in range(0, 10)])
subject_template = models.ForeignKey(DynaFormTemplate,
related_name="dynaformrecipient_subject_template_related",
limit_choices_to={
'template_type': DynaFormTemplate.TEMPLATE_TYPES_SUBJECT
},
blank=True, null=True
)
body_template = models.ForeignKey(DynaFormTemplate,
related_name="dynaformrecipient_body_template_related",
limit_choices_to={
'template_type': DynaFormTemplate.TEMPLATE_TYPES_BODY
},
blank=True, null=True
)
class Meta:
ordering = ('alternate_send', 'alternate_index')
def __str__(self):
return u"{name} <{email}>".format(**self.__dict__)
@classmethod
def send_to_recipient_list(cls, object_form, context, attachment=[], **kwargs):
"""
Crea y ordena la lista de destinatarios
"""
recipient_list = cls.objects.filter(
Q(alternate_send=cls.ALTERNATE_SEND_ALWAYS) |
Q(alternate_send=cls.ALTERNATE_SEND_AUTO, alternate_index=0),
object_form=object_form)
for recipient in recipient_list:
if not settings.DEBUG:
recipient.send_email(context, attachment, **kwargs)
log.info("[%s] Envia el mail al destinatario %s", recipient.alternate_send, recipient.email)
# Si cumple con la condicion campo [==,<,>,!=,<=,>=] campo/valor [AND, OR, NOT]
def alternate_condition_match(value, context):
template = Template("{% if " + str(value) + "%}1{% endif %}")
return template.render(Context(context)) == u'1'
recipient_list = cls.objects.filter(object_form=object_form, alternate_send=cls.ALTERNATE_SEND_IF)
for recipient in recipient_list:
if alternate_condition_match(recipient.alternate_condition, context):
if not settings.DEBUG:
recipient.send_email(context, attachment, **kwargs)
log.info("Se cumple la condicion \"%s\", [%s] Envia el mail al destinatario %s",
recipient.alternate_condition, recipient.alternate_send, recipient.email)
log.debug("shift alternate")
cls.shift_alternate(object_form)
def send_email(self, context, attachment=[], **kwargs):
"""
Atajo para crear el mail completo, crea una instancia de
EmailMultiAlternatives y luego es enviada
Luego para mejorar la performance de envios lo mejor es usar la misma
conexión al SMTP
"""
connection = kwargs.get('connection')
context['recipient'] = self
subject = (self.subject_template or self.object_form.subject_template).render(context)
body = (self.body_template or self.object_form.body_template).render(context)
_template = (self.body_template or self.object_form.body_template)
log.debug("Envia el mail:\r\n=====================\r\n\r\nFrom: %s\r\nTo: %s\r\n%s\r\n%s",
self.object_form.from_email, [self.email], subject, body)
if _template.is_plain:
msg = EmailMultiAlternatives(subject, safe(body),
self.object_form.from_email, [self.email],
connection=connection)
else:
msg = EmailMultiAlternatives(subject, escape(body),
self.object_form.from_email, [self.email],
connection=connection)
msg.attach_alternative(body, "text/html")
if attachment:
for attach in attach_list:
msg.attach_file(attach['content'], attach['mimetype'])
msg.send()
@classmethod
def shift_alternate(cls, object_form):
qs = cls.objects.filter(object_form=object_form,
alternate_send=cls.ALTERNATE_SEND_AUTO)
recipient_list = qs.values('alternate_index')\
.annotate(models.Max('alternate_index'))\
.order_by('-alternate_index')
total_recipient = len(recipient_list)
for i, recipient in enumerate(recipient_list):
recipient['alternate_index__max']
srr = recipient['alternate_index__max']
srl = srr + 1
if i == 0:
srl = total_recipient + 1
log.debug("Shift register %s -> %s" , srr, srl)
qs.filter(alternate_index=srr).update(alternate_index=srl)
srr = total_recipient + 1
srl = 0
log.debug("Shift register %s -> %s" , srr, srl)
qs.filter(alternate_index=srr).update(alternate_index=srl)
|
neoascetic/cmsplugin-filer
|
refs/heads/develop
|
cmsplugin_filer_teaser/migrations/0006_fix_migration_mess.py
|
28
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'FilerTeaser.external_image'
db.delete_column('cmsplugin_filerteaser', 'external_image')
def backwards(self, orm):
# Adding field 'FilerTeaser.external_image'
db.add_column('cmsplugin_filerteaser', 'external_image', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_filer_teaser.filerteaser': {
'Meta': {'object_name': 'FilerTeaser', 'db_table': "'cmsplugin_filerteaser'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'free_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_blank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'use_autoscale': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_teaser']
|
charbeljc/project-service
|
refs/heads/8.0
|
__unported__/analytic_hours_block/__openerp__.py
|
21
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Vincent Renaville, ported by Joel Grand-Guillaume
# Copyright 2010-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Project Hours Blocks Management",
"version": "1.5",
"category": "Generic Modules/Projects & Services",
"description": """
Project Hours Blocks Management
===============================
This module allows you to handle hours blocks,
to follow for example the user support contracts.
This means, you sell a product of type "hours block"
then you input the spent hours on the hours block and
you can track and follow how much has been used.
""",
"author": "Camptocamp,Odoo Community Association (OCA)",
"license": 'AGPL-3',
"website": "http://www.camptocamp.com",
"depends": [
"account",
"hr_timesheet_invoice",
"analytic",
"project",
],
"data": [
"report.xml",
"hours_block_view.xml",
"hours_block_data.xml",
"hours_block_menu.xml",
"product_view.xml",
"project_view.xml",
"report.xml",
"security/hours_block_security.xml",
"security/ir.model.access.csv",
],
"active": False,
"installable": False
}
|
zack3241/incubator-airflow
|
refs/heads/master
|
tests/dags/test_retry_handling_job.py
|
38
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2016,10,5,19),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 4,
'retry_delay': timedelta(seconds=0),
}
dag = DAG('test_retry_handling_job', default_args=default_args, schedule_interval='@once')
task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
dag=dag)
|
mshafiq9/django
|
refs/heads/master
|
django/conf/locale/sq/__init__.py
|
12133432
| |
ttsubo/ryu
|
refs/heads/master
|
ryu/app/gui_topology/__init__.py
|
12133432
| |
melon-li/openstack-dashboard
|
refs/heads/master
|
openstack_dashboard/management/commands/__init__.py
|
12133432
| |
DengueTim/linux-rockchip
|
refs/heads/master
|
scripts/gdb/linux/__init__.py
|
2010
|
# nothing to do for the initialization of this package
|
bohlian/frappe
|
refs/heads/develop
|
frappe/website/doctype/blogger/blogger.py
|
73
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class Blogger(Document):
def validate(self):
if self.user and not frappe.db.exists("User", self.user):
# for data import
frappe.get_doc({
"doctype":"User",
"email": self.user,
"first_name": self.user.split("@")[0]
}).insert()
def on_update(self):
"if user is set, then update all older blogs"
from frappe.website.doctype.blog_post.blog_post import clear_blog_cache
clear_blog_cache()
if self.user:
for blog in frappe.db.sql_list("""select name from `tabBlog Post` where owner=%s
and ifnull(blogger,'')=''""", self.user):
b = frappe.get_doc("Blog Post", blog)
b.blogger = self.name
b.save()
frappe.permissions.add_user_permission("Blogger", self.name, self.user)
|
talishte/ctigre
|
refs/heads/master
|
env/lib/python2.7/site-packages/mezzanine/blog/migrations/0005_auto__del_comment__add_field_blogpost_comments_count__chg_field_blogpo.py
|
8
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Comment'
db.delete_table('blog_comment')
# Adding field 'BlogPost.comments_count'
db.add_column('blog_blogpost', 'comments_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Changing field 'BlogPost.description'
db.alter_column('blog_blogpost', 'description', self.gf('django.db.models.fields.TextField')(blank=True))
def backwards(self, orm):
# Adding model 'Comment'
db.create_table('blog_comment', (
('blog_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['blog.BlogPost'])),
('body', self.gf('django.db.models.fields.TextField')()),
('by_author', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('email_hash', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('time_created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('approved', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('replied_to', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', null=True, to=orm['blog.Comment'], blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('blog', ['Comment'])
# Deleting field 'BlogPost.comments_count'
db.delete_column('blog_blogpost', 'comments_count')
# Changing field 'BlogPost.description'
db.alter_column('blog_blogpost', 'description', self.gf('mezzanine.core.fields.HtmlField')(blank=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.blogcategory': {
'Meta': {'object_name': 'BlogCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'blog.blogpost': {
'Meta': {'object_name': 'BlogPost'},
'_keywords': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'blogposts'", 'blank': 'True', 'to': "orm['blog.BlogCategory']"}),
#'comments': ('mezzanine.generic.fields.CommentsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.ThreadedComment']"}),
'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Keyword']", 'symmetrical': 'False', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blogposts'", 'to': "orm['%s']" % user_orm_label})
},
'comments.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['%s']" % user_orm_label}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.threadedcomment': {
'Meta': {'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'email_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['blog']
|
ironbox360/django
|
refs/heads/master
|
django/forms/fields.py
|
63
|
"""
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import uuid
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.boundfield import BoundField
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats, six
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'DurationField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField',
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, disabled=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# disabled -- Boolean that specifies whether the field is disabled, that
# is its widget is shown in the form but not editable.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.disabled = disabled
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
initial_value = self._coerce(initial_value)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name)
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, strip=True, *args, **kwargs):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
value = force_text(value)
if self.strip:
value = value.strip()
return value
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
re_decimal = re.compile(r'\.0*\s*$')
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
self.validators.append(validators.DecimalValidator(max_digits, decimal_places))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class DurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
value = parse_duration(value)
if value is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
kwargs.setdefault('strip', False)
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class CallableChoiceIterator(object):
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
for e in self.choices_func():
yield e
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def __init__(self, *args, **kwargs):
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
class UUIDField(CharField):
default_error_messages = {
'invalid': _('Enter a valid UUID.'),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def to_python(self, value):
value = super(UUIDField, self).to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
|
mensler/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/bigswitch_utils.py
|
73
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016, Ted Elhourani <ted@bigswitch.com>
#
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from ansible.module_utils.urls import fetch_url
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class Rest(object):
def __init__(self, module, headers, baseurl):
self.module = module
self.headers = headers
self.baseurl = baseurl
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
return Response(resp, info)
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def patch(self, path, data=None, headers=None):
return self.send('PATCH', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
|
Johnzero/OE7
|
refs/heads/master
|
openerp/addons-modules/report_webkit/header.py
|
7
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import fields, osv
class HeaderHTML(osv.osv):
"""HTML Header allows you to define HTML CSS and Page format"""
_name = "ir.header_webkit"
_columns = {
'company_id' : fields.many2one('res.company', 'Company'),
'html' : fields.text('webkit header', help="Set Webkit Report Header"),
'footer_html' : fields.text('webkit footer', help="Set Webkit Report Footer."),
'css' : fields.text('Header CSS'),
'name' : fields.char('Name', size=128, required=True),
'margin_top' : fields.float('Top Margin (mm)'),
'margin_bottom' : fields.float('Bottom Margin (mm)'),
'margin_left' : fields.float('Left Margin (mm)'),
'margin_right' : fields.float('Right Margin (mm)'),
'orientation' : fields.selection(
[('Landscape','Landscape'),('Portrait', 'Portrait')],
'Orientation'
),
'format': fields.selection(
[
('A0' ,'A0 5 841 x 1189 mm'),
('A1' ,'A1 6 594 x 841 mm'),
('A2' ,'A2 7 420 x 594 mm'),
('A3' ,'A3 8 297 x 420 mm'),
('A4' ,'A4 0 210 x 297 mm, 8.26 x 11.69 inches'),
('A5' ,'A5 9 148 x 210 mm'),
('A6' ,'A6 10 105 x 148 mm'),
('A7' ,'A7 11 74 x 105 mm'),
('A8' ,'A8 12 52 x 74 mm'),
('A9' ,'A9 13 37 x 52 mm'),
('B0' ,'B0 14 1000 x 1414 mm'),
('B1' ,'B1 15 707 x 1000 mm'),
('B2' ,'B2 17 500 x 707 mm'),
('B3' ,'B3 18 353 x 500 mm'),
('B4' ,'B4 19 250 x 353 mm'),
('B5' ,'B5 1 176 x 250 mm, 6.93 x 9.84 inches'),
('B6' ,'B6 20 125 x 176 mm'),
('B7' ,'B7 21 88 x 125 mm'),
('B8' ,'B8 22 62 x 88 mm'),
('B9' ,'B9 23 33 x 62 mm'),
('B10',':B10 16 31 x 44 mm'),
('C5E','C5E 24 163 x 229 mm'),
('Comm10E','Comm10E 25 105 x 241 mm, U.S. Common 10 Envelope'),
('DLE', 'DLE 26 110 x 220 mm'),
('Executive','Executive 4 7.5 x 10 inches, 190.5 x 254 mm'),
('Folio','Folio 27 210 x 330 mm'),
('Ledger', 'Ledger 28 431.8 x 279.4 mm'),
('Legal', 'Legal 3 8.5 x 14 inches, 215.9 x 355.6 mm'),
('Letter','Letter 2 8.5 x 11 inches, 215.9 x 279.4 mm'),
('Tabloid', 'Tabloid 29 279.4 x 431.8 mm'),
],
'Paper size',
required=True,
help="Select Proper Paper size"
)
}
HeaderHTML()
class HeaderImage(osv.osv):
"""Logo allows you to define multiple logo per company"""
_name = "ir.header_img"
_columns = {
'company_id' : fields.many2one('res.company', 'Company'),
'img' : fields.binary('Image'),
'name' : fields.char('Name', size=128, required =True, help="Name of Image"),
'type' : fields.char('Type', size=32, required =True, help="Image type(png,gif,jpeg)")
}
HeaderImage()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
strongswan/swidGenerator
|
refs/heads/master
|
swid_generator/environments/common.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import stat
import platform
import distro
from distutils.spawn import find_executable
from swid_generator.package_info import FileInfo
from swid_generator.exceptions import RequirementsNotInstalledError
from swid_generator.patches import unicode_patch
class CommonEnvironment(object):
"""
The common base for all environment classes.
"""
executable = None
conffile_file_name = None
control_archive = None
required_packages_for_package_file_method = None
required_packages_for_sign_method = None
@staticmethod
def get_architecture():
"""
Return machine type, e.g. 'x86_64 or 'i386'.
"""
return platform.machine()
@staticmethod
def get_os_string():
"""
Return distribution string, e.g. 'Debian_7.4'.
"""
dist = [distro.id().capitalize(), distro.version()]
return '_'.join(filter(None, dist)) or 'unknown'
@staticmethod
def _is_file(path):
"""
Determine whether the specified path is an existing file.
This is needed because some package managers don't list only regular
files, but also directories and message strings.
It's also possible that the file/directory/symlink entries returned by
the package manager don't actually exist in the filesystem.
Args:
path (str):
The path to check.
Returns:
True or False
"""
if path[0] != '/':
return False
try:
mode = os.stat(path.encode('utf-8')).st_mode
except OSError:
return False
if stat.S_ISDIR(mode):
return False
return True
@classmethod
def is_installed(cls):
assert cls.executable is not None, 'Executable may not be None'
return find_executable(cls.executable)
@classmethod
def get_files_from_folder(cls, evidence_path, new_root_path):
"""
Get all files from a path on the filesystem
:param evidence_path: Path on the filesystem
:return: Lexicographical sorted List of FileInfo()-Objects
"""
def get_fileinfo(path, base):
if new_root_path is None:
return FileInfo(path)
else:
path_for_tag = path.replace(unicode_patch(base), unicode_patch(new_root_path), 1)
path_for_tag = path_for_tag.replace('//', '/')
file_info = FileInfo(path_for_tag, actual_path=False)
file_info.set_actual_path(path)
return file_info
result_files = []
if os.path.isdir(evidence_path):
for dirpath, _, files in os.walk(evidence_path):
for file in files:
file_path = '/'.join([unicode_patch(dirpath), unicode_patch(file)])
result_files.append(get_fileinfo(file_path, evidence_path))
else:
file_path = os.path.realpath(evidence_path)
result_files.append(get_fileinfo(unicode_patch(file_path), os.path.dirname(file_path)))
return result_files
@classmethod
def check_package_installed(cls, package_name):
"""
Checks if the Package is installed on System
:param package_name: Name of the Package.
:return: None if the package is not installed and the executable if package is installed.
"""
return find_executable(package_name)
@classmethod
def check_requirements(cls, package_file_execution=False, sign_tag_execution=False):
"""
Checks if all the Linux-commands are installed for the required operations (e.g: get_files_from_packagefile or signxml).
If the Requirements are not met, a RequirementsNotInstalledError raises.
:param package_file_execution: Default: False. Choice between get_files_from_packagefile or signxml operation.
:param sign_tag_execution:
"""
assert cls.required_packages_for_package_file_method is not None, 'List of required packages for package file execution may not be None'
assert cls.required_packages_for_sign_method is not None, 'List of required packages for sing execution may not be None'
not_installed_packages = list()
required_packages = list()
if package_file_execution is True:
required_packages.extend(cls.required_packages_for_package_file_method)
if sign_tag_execution is True:
required_packages.extend(cls.required_packages_for_sign_method)
for package in required_packages:
is_installed = cls.check_package_installed(package)
if is_installed is None:
not_installed_packages.append(package)
if len(not_installed_packages) != 0:
raise RequirementsNotInstalledError("Please install following packages: " + ",".join(not_installed_packages))
|
iiman/mytardis
|
refs/heads/master
|
tardis/apps/oaipmh/provider/base.py
|
5
|
import oaipmh.error
import oaipmh.interfaces
class BaseProvider(oaipmh.interfaces.IOAI, object):
"""
A base provider which roughly implements the PyOAI interface for OAI-PMH
servers.
Extend this if you're writing your own provider for a new type or a
different metadata format.
"""
def __init__(self, site):
self._site = site
def getRecord(self, metadataPrefix, identifier):
"""Get a record for a metadataPrefix and identifier.
:param metadataPrefix: identifies metadata set to retrieve
:type metadataPrefix: string
:param identifier: - repository-unique identifier of record
:type identifier: string
:raises oaipmh.error.CannotDisseminateFormatError: if
``metadataPrefix`` is unknown or not supported by identifier.
:raises oaipmh.error.IdDoesNotExistError: if identifier is
unknown or illegal.
:returns: a ``header``, ``metadata``, ``about`` tuple describing
the record.
"""
raise oaipmh.error.IdDoesNotExistError
def identify(self):
raise NotImplementedError
def listIdentifiers(self, metadataPrefix, set=None, from_=None, until=None):
"""Get a list of header information on records.
:param metadataPrefix: identifies metadata set to retrieve
:type metadataPrefix: string
:param set: set identifier; only return headers in set
:type set: string
:param from_: only retrieve headers from from_ date forward
(in naive UTC)
:type from_: datetime
:param until: only retrieve headers with dates up to and including
until date (in naive UTC)
:type until: datetime
:raise error.CannotDisseminateFormatError: if metadataPrefix
is not supported by the repository.
:raises error.NoSetHierarchyError: if the repository does not
support sets.
:returns: an iterable of headers.
"""
raise oaipmh.error.CannotDisseminateFormatError
def listMetadataFormats(self, identifier=None):
"""List metadata formats supported by repository or record.
:param identifier: identify record for which we want to know all
supported metadata formats. If absent, list all metadata
formats supported by repository.
:type identifier: string
:raises error.IdDoesNotExistError: if record with
identifier does not exist.
:raises error.NoMetadataFormatsError: if no formats are
available for the indicated record.
:returns: an iterable of ``metadataPrefix``, ``schema``,
``metadataNamespace`` tuples (each entry in the tuple is a string).
"""
return []
def listRecords(self, metadataPrefix, set=None, from_=None, until=None):
"""
Get a list of header, metadata and about information on records.
:param metadataPrefix: identifies metadata set to retrieve
:type metadataPrefix: string
:param set: set identifier; only return records in set
:type set: string
:param from_: only retrieve records from ``from_`` date forward
(in naive UTC)
:type from_: datetime
:param until: only retrieve records with dates up to and including
until date (in naive UTC)
:type until: datetime
:raises oaipmh.error.CannotDisseminateFormatError: if ``metadataPrefix``
is not supported by the repository.
:raises oaipmh.error.NoSetHierarchyError: if the repository does not
support sets.
:returns: an iterable of ``header``, ``metadata``, ``about`` tuples.
"""
raise oaipmh.error.CannotDisseminateFormatError
def listSets(self):
"""
Get a list of sets in the repository.
:raises error.NoSetHierarchyError: if the repository does not support
sets.
:returns: an iterable of setSpec, setName tuples (strings).
"""
raise oaipmh.error.NoSetHierarchyError
def writeMetadata(self, element, metadata):
"""
Create XML elements under the given element, using the provided
metadata.
Should avoid doing any model-lookups, as they should be done when
creating the metadata.
:param element: element to put all content under (as SubElements)
:type element: lxml.etree.Element
:param metadata: metadata to turn into XML
:type metadata: oaipmh.common.Metadata
"""
raise NotImplementedError
|
svendavison/mablog2
|
refs/heads/master
|
gen2.py
|
1
|
#!/usr/bin/python
#
# written by @eric_capuano
# https://github.com/ecapuano/web-traffic-generator
#
# published under MIT license :) do what you want.
#
import requests, re, time, random, config
def doRequest(url):
global dataMeter
global goodRequests
global badRequests
sleepTime = random.randrange(config.minWait,config.maxWait)
if config.debug:
print "requesting: %s" % url
headers = {'user-agent': config.userAgent}
try:
r = requests.get(url, headers=headers)
except:
time.sleep(30) # else we'll enter 100% CPU loop in a net down situation
return False
status = r.status_code
pageSize = len(r.content)
dataMeter = dataMeter + pageSize
if config.debug:
print "Page size: %s" % pageSize
if ( dataMeter > 1000000 ):
print "Data meter: %s MB" % (dataMeter / 1000000)
else:
print "Data meter: %s bytes" % dataMeter
if ( status != 200 ):
badRequests+=1
if config.debug:
print "Response status: %s" % r.status_code
if ( status == 429 ):
if config.debug:
print "We're making requests too frequently... sleeping longer..."
sleepTime+=30
else:
goodRequests+=1
# need to sleep for random number of seconds!
if config.debug:
print "Good requests: %s" % goodRequests
print "Bad reqeusts: %s" % badRequests
print "Sleeping for %s seconds..." % sleepTime
time.sleep(sleepTime)
return r
def getLinks(page):
links=[]
pattern=r"(?:href\=\")(https?:\/\/[^\"]+)(?:\")"
matches = re.findall(pattern,page.content)
for match in matches: # check all matches against config.blacklist
if any(bl in match for bl in config.blacklist):
pass
else:
links.insert(0,match)
return links
def browse(urls):
currURL = 1
for url in urls:
urlCount = len(urls)
page = doRequest(url) # hit current root URL
if page:
links = getLinks(page) # extract links from page
linkCount = len(links)
else:
if config.debug:
print "Error requesting %s" % url
continue
depth=0
while ( depth < config.clickDepth ):
if config.debug:
print "------------------------------------------------------"
print "config.blacklist: %s" % config.blacklist
# set the link count, which will change throughout the loop
linkCount = len(links)
if ( linkCount > 1): # make sure we have more than 1 link to use
if config.debug:
print "URL: %s / %s -- Depth: %s / %s" \
% (currURL,urlCount,depth,config.clickDepth)
print "Choosing random link from total: %s" % linkCount
randomLink = random.randrange(0,linkCount - 1)
if config.debug:
print "Link chosen: %s of %s" % (randomLink,linkCount)
clickLink = links[randomLink]
try:
# browse to random link on rootURL
sub_page = doRequest(clickLink)
if sub_page:
checkLinkCount = len(getLinks(sub_page))
else:
if config.debug:
print "Error requesting %s" % url
break
checkLinkCount = len(getLinks(sub_page))
# make sure we have more than 1 link to pick from
if ( checkLinkCount > 1 ):
# extract links from the new page
links = getLinks(sub_page)
else:
# else retry with current link list
if config.debug:
print "Not enough links found! Found: %s -- " \
"Going back up a level" % checkLinkCount
config.blacklist.insert(0,clickLink)
# remove the dead-end link from our list
del links[randomLink]
except:
if config.debug:
print "Exception on URL: %s -- " \
"removing from list and trying again!" % clickLink
# I need to expand more on exception type for config.debugging
config.blacklist.insert(0,clickLink)
# remove the dead-end link from our list
del links[randomLink]
pass
# increment counter whether request was successful or not
# so that we don't end up in an infinite failed request loop
depth+=1
else:
# we land here if we went down a path that dead-ends
# could implement logic to simply restart at same root
if config.debug:
print "Hit a dead end...Moving to next Root URL"
#config.blacklist.insert(0,clickLink)
config.blacklist.insert(0, "")
depth = config.clickDepth
currURL+=1 # increase rootURL iteration
if config.debug:
print "Done."
# initialize our global variables
dataMeter = 0
goodRequests = 0
badRequests = 0
while True:
print "Traffic generator started..."
print "----------------------------"
print "https://github.com/ecapuano/web-traffic-generator"
print ""
print "Clicking %s links deep into %s different root URLs, " \
% (config.clickDepth,len(config.rootURLs))
print "waiting between %s and %s seconds between requests. " \
% (config.minWait,config.maxWait)
print ""
print "This script will run indefinitely. Ctrl+C to stop."
browse(config.rootURLs)
|
robiame/AndroidGeodata
|
refs/heads/master
|
pil/GribStubImagePlugin.py
|
1
|
#
# The Python Imaging Library
# $Id$
#
# GRIB stub adapter
#
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
_handler = None
##
# Install application-specific GRIB image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
# --------------------------------------------------------------------
# Image adapter
def _accept(prefix):
return prefix[0:4] == "GRIB" and prefix[7] == chr(1)
class GribStubImageFile(ImageFile.StubImageFile):
format = "GRIB"
format_description = "GRIB"
def _open(self):
offset = self.fp.tell()
if not _accept(self.fp.read(8)):
raise SyntaxError("Not a GRIB file")
self.fp.seek(offset)
# make something up
self.mode = "F"
self.size = 1, 1
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("GRIB save handler not installed")
_handler.save(im, fp, filename)
# --------------------------------------------------------------------
# Registry
Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept)
Image.register_save(GribStubImageFile.format, _save)
Image.register_extension(GribStubImageFile.format, ".grib")
|
emedinaa/contentbox
|
refs/heads/master
|
third_party/unidecode/x06f.py
|
4
|
data = (
'Qing ', # 0x00
'Yu ', # 0x01
'Piao ', # 0x02
'Ji ', # 0x03
'Ya ', # 0x04
'Jiao ', # 0x05
'Qi ', # 0x06
'Xi ', # 0x07
'Ji ', # 0x08
'Lu ', # 0x09
'Lu ', # 0x0a
'Long ', # 0x0b
'Jin ', # 0x0c
'Guo ', # 0x0d
'Cong ', # 0x0e
'Lou ', # 0x0f
'Zhi ', # 0x10
'Gai ', # 0x11
'Qiang ', # 0x12
'Li ', # 0x13
'Yan ', # 0x14
'Cao ', # 0x15
'Jiao ', # 0x16
'Cong ', # 0x17
'Qun ', # 0x18
'Tuan ', # 0x19
'Ou ', # 0x1a
'Teng ', # 0x1b
'Ye ', # 0x1c
'Xi ', # 0x1d
'Mi ', # 0x1e
'Tang ', # 0x1f
'Mo ', # 0x20
'Shang ', # 0x21
'Han ', # 0x22
'Lian ', # 0x23
'Lan ', # 0x24
'Wa ', # 0x25
'Li ', # 0x26
'Qian ', # 0x27
'Feng ', # 0x28
'Xuan ', # 0x29
'Yi ', # 0x2a
'Man ', # 0x2b
'Zi ', # 0x2c
'Mang ', # 0x2d
'Kang ', # 0x2e
'Lei ', # 0x2f
'Peng ', # 0x30
'Shu ', # 0x31
'Zhang ', # 0x32
'Zhang ', # 0x33
'Chong ', # 0x34
'Xu ', # 0x35
'Huan ', # 0x36
'Kuo ', # 0x37
'Jian ', # 0x38
'Yan ', # 0x39
'Chuang ', # 0x3a
'Liao ', # 0x3b
'Cui ', # 0x3c
'Ti ', # 0x3d
'Yang ', # 0x3e
'Jiang ', # 0x3f
'Cong ', # 0x40
'Ying ', # 0x41
'Hong ', # 0x42
'Xun ', # 0x43
'Shu ', # 0x44
'Guan ', # 0x45
'Ying ', # 0x46
'Xiao ', # 0x47
'[?] ', # 0x48
'[?] ', # 0x49
'Xu ', # 0x4a
'Lian ', # 0x4b
'Zhi ', # 0x4c
'Wei ', # 0x4d
'Pi ', # 0x4e
'Jue ', # 0x4f
'Jiao ', # 0x50
'Po ', # 0x51
'Dang ', # 0x52
'Hui ', # 0x53
'Jie ', # 0x54
'Wu ', # 0x55
'Pa ', # 0x56
'Ji ', # 0x57
'Pan ', # 0x58
'Gui ', # 0x59
'Xiao ', # 0x5a
'Qian ', # 0x5b
'Qian ', # 0x5c
'Xi ', # 0x5d
'Lu ', # 0x5e
'Xi ', # 0x5f
'Xuan ', # 0x60
'Dun ', # 0x61
'Huang ', # 0x62
'Min ', # 0x63
'Run ', # 0x64
'Su ', # 0x65
'Liao ', # 0x66
'Zhen ', # 0x67
'Zhong ', # 0x68
'Yi ', # 0x69
'Di ', # 0x6a
'Wan ', # 0x6b
'Dan ', # 0x6c
'Tan ', # 0x6d
'Chao ', # 0x6e
'Xun ', # 0x6f
'Kui ', # 0x70
'Yie ', # 0x71
'Shao ', # 0x72
'Tu ', # 0x73
'Zhu ', # 0x74
'San ', # 0x75
'Hei ', # 0x76
'Bi ', # 0x77
'Shan ', # 0x78
'Chan ', # 0x79
'Chan ', # 0x7a
'Shu ', # 0x7b
'Tong ', # 0x7c
'Pu ', # 0x7d
'Lin ', # 0x7e
'Wei ', # 0x7f
'Se ', # 0x80
'Se ', # 0x81
'Cheng ', # 0x82
'Jiong ', # 0x83
'Cheng ', # 0x84
'Hua ', # 0x85
'Jiao ', # 0x86
'Lao ', # 0x87
'Che ', # 0x88
'Gan ', # 0x89
'Cun ', # 0x8a
'Heng ', # 0x8b
'Si ', # 0x8c
'Shu ', # 0x8d
'Peng ', # 0x8e
'Han ', # 0x8f
'Yun ', # 0x90
'Liu ', # 0x91
'Hong ', # 0x92
'Fu ', # 0x93
'Hao ', # 0x94
'He ', # 0x95
'Xian ', # 0x96
'Jian ', # 0x97
'Shan ', # 0x98
'Xi ', # 0x99
'Oki ', # 0x9a
'[?] ', # 0x9b
'Lan ', # 0x9c
'[?] ', # 0x9d
'Yu ', # 0x9e
'Lin ', # 0x9f
'Min ', # 0xa0
'Zao ', # 0xa1
'Dang ', # 0xa2
'Wan ', # 0xa3
'Ze ', # 0xa4
'Xie ', # 0xa5
'Yu ', # 0xa6
'Li ', # 0xa7
'Shi ', # 0xa8
'Xue ', # 0xa9
'Ling ', # 0xaa
'Man ', # 0xab
'Zi ', # 0xac
'Yong ', # 0xad
'Kuai ', # 0xae
'Can ', # 0xaf
'Lian ', # 0xb0
'Dian ', # 0xb1
'Ye ', # 0xb2
'Ao ', # 0xb3
'Huan ', # 0xb4
'Zhen ', # 0xb5
'Chan ', # 0xb6
'Man ', # 0xb7
'Dan ', # 0xb8
'Dan ', # 0xb9
'Yi ', # 0xba
'Sui ', # 0xbb
'Pi ', # 0xbc
'Ju ', # 0xbd
'Ta ', # 0xbe
'Qin ', # 0xbf
'Ji ', # 0xc0
'Zhuo ', # 0xc1
'Lian ', # 0xc2
'Nong ', # 0xc3
'Guo ', # 0xc4
'Jin ', # 0xc5
'Fen ', # 0xc6
'Se ', # 0xc7
'Ji ', # 0xc8
'Sui ', # 0xc9
'Hui ', # 0xca
'Chu ', # 0xcb
'Ta ', # 0xcc
'Song ', # 0xcd
'Ding ', # 0xce
'[?] ', # 0xcf
'Zhu ', # 0xd0
'Lai ', # 0xd1
'Bin ', # 0xd2
'Lian ', # 0xd3
'Mi ', # 0xd4
'Shi ', # 0xd5
'Shu ', # 0xd6
'Mi ', # 0xd7
'Ning ', # 0xd8
'Ying ', # 0xd9
'Ying ', # 0xda
'Meng ', # 0xdb
'Jin ', # 0xdc
'Qi ', # 0xdd
'Pi ', # 0xde
'Ji ', # 0xdf
'Hao ', # 0xe0
'Ru ', # 0xe1
'Zui ', # 0xe2
'Wo ', # 0xe3
'Tao ', # 0xe4
'Yin ', # 0xe5
'Yin ', # 0xe6
'Dui ', # 0xe7
'Ci ', # 0xe8
'Huo ', # 0xe9
'Jing ', # 0xea
'Lan ', # 0xeb
'Jun ', # 0xec
'Ai ', # 0xed
'Pu ', # 0xee
'Zhuo ', # 0xef
'Wei ', # 0xf0
'Bin ', # 0xf1
'Gu ', # 0xf2
'Qian ', # 0xf3
'Xing ', # 0xf4
'Hama ', # 0xf5
'Kuo ', # 0xf6
'Fei ', # 0xf7
'[?] ', # 0xf8
'Boku ', # 0xf9
'Jian ', # 0xfa
'Wei ', # 0xfb
'Luo ', # 0xfc
'Zan ', # 0xfd
'Lu ', # 0xfe
'Li ', # 0xff
)
|
s3team/loop
|
refs/heads/master
|
libtracewrap/libtrace/protobuf/python/google/protobuf/internal/message_test.py
|
223
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
if __name__ == '__main__':
unittest.main()
|
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/editing/firstParamMetaClass.py
|
83
|
class A(type):
@classmethod
def f<caret>
|
serapio/kwaras
|
refs/heads/master
|
src/conf/__init__.py
|
2
|
__author__ = 'Lucien'
|
synergeticsedx/deployment-wipro
|
refs/heads/oxa/master.fic
|
lms/djangoapps/lms_xblock/migrations/0001_initial.py
|
87
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='XBlockAsidesConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('disabled_blocks', models.TextField(default=b'about course_info static_tab', help_text=b'Space-separated list of XBlocks on which XBlockAsides should never render.')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
hwstreaming/flink
|
refs/heads/master
|
flink-libraries/flink-python/src/main/python/org/apache/flink/python/api/flink/connection/Iterator.py
|
21
|
# ###############################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from struct import unpack
from collections import deque
try:
import _abcoll as defIter
except:
import _collections_abc as defIter
from flink.connection.Constants import Types
#=====Iterator==========================================================================================================
class ListIterator(defIter.Iterator):
def __init__(self, values):
super(ListIterator, self).__init__()
self._values = deque(values)
def __next__(self):
return self.next()
def next(self):
if self.has_next():
return self._values.popleft()
else:
raise StopIteration
def has_next(self):
return self._values
class GroupIterator(defIter.Iterator):
def __init__(self, iterator, keys=None):
super(GroupIterator, self).__init__()
self.iterator = iterator
self.key = None
self.keys = keys
if self.keys is None:
self._extract_keys = self._extract_keys_id
self.cur = None
self.empty = False
def _init(self):
if self.iterator.has_next():
self.empty = False
self.cur = self.iterator.next()
self.key = self._extract_keys(self.cur)
else:
self.empty = True
def __next__(self):
return self.next()
def next(self):
if self.has_next():
tmp = self.cur
if self.iterator.has_next():
self.cur = self.iterator.next()
if self.key != self._extract_keys(self.cur):
self.empty = True
else:
self.cur = None
self.empty = True
return tmp[1]
else:
raise StopIteration
def has_next(self):
if self.empty:
return False
return self.key == self._extract_keys(self.cur)
def has_group(self):
return self.cur is not None
def next_group(self):
self.key = self._extract_keys(self.cur)
self.empty = False
def _extract_keys(self, x):
return [x[0][k] for k in self.keys]
def _extract_keys_id(self, x):
return x
class CoGroupIterator(object):
NONE_REMAINED = 1
FIRST_REMAINED = 2
SECOND_REMAINED = 3
FIRST_EMPTY = 4
SECOND_EMPTY = 5
def __init__(self, c1, c2, k1, k2):
self.i1 = GroupIterator(c1, k1)
self.i2 = GroupIterator(c2, k2)
self.p1 = None
self.p2 = None
self.match = None
self.key = None
def _init(self):
self.i1._init()
self.i2._init()
def next(self):
first_empty = True
second_empty = True
if self.match != CoGroupIterator.FIRST_EMPTY:
if self.match == CoGroupIterator.FIRST_REMAINED:
first_empty = False
else:
if self.i1.has_group():
self.i1.next_group()
self.key = self.i1.key
first_empty = False
if self.match != CoGroupIterator.SECOND_EMPTY:
if self.match == CoGroupIterator.SECOND_REMAINED:
second_empty = False
else:
if self.i2.has_group():
self.i2.next_group()
second_empty = False
if first_empty and second_empty:
return False
elif first_empty and (not second_empty):
self.p1 = DummyIterator()
self.p2 = self.i2
self.match = CoGroupIterator.FIRST_EMPTY
return True
elif (not first_empty) and second_empty:
self.p1 = self.i1
self.p2 = DummyIterator()
self.match = CoGroupIterator.SECOND_EMPTY
return True
else:
if self.key == self.i2.key:
self.p1 = self.i1
self.p2 = self.i2
self.match = CoGroupIterator.NONE_REMAINED
elif self.key < self.i2.key:
self.p1 = self.i1
self.p2 = DummyIterator()
self.match = CoGroupIterator.SECOND_REMAINED
else:
self.p1 = DummyIterator()
self.p2 = self.i2
self.match = CoGroupIterator.FIRST_REMAINED
return True
class Iterator(defIter.Iterator):
def __init__(self, con, env, group=0):
super(Iterator, self).__init__()
self._connection = con
self._init = True
self._group = group
self._deserializer = None
self._env = env
self._size = 0
def __next__(self):
return self.next()
def _read(self, des_size):
return self._connection.read(des_size, self._group)
def next(self):
if self.has_next():
custom_types = self._env._types
read = self._read
if self._deserializer is None:
type = read(1)
if type == Types.TYPE_ARRAY:
key_des = _get_deserializer(read, custom_types)
self._deserializer = ArrayDeserializer(key_des)
return key_des.deserialize(read)
elif type == Types.TYPE_KEY_VALUE:
size = ord(read(1))
key_des = []
keys = []
for _ in range(size):
new_d = _get_deserializer(read, custom_types)
key_des.append(new_d)
keys.append(new_d.deserialize(read))
val_des = _get_deserializer(read, custom_types)
val = val_des.deserialize(read)
self._deserializer = KeyValueDeserializer(key_des, val_des)
return (tuple(keys), val)
elif type == Types.TYPE_VALUE_VALUE:
des1 = _get_deserializer(read, custom_types)
field1 = des1.deserialize(read)
des2 = _get_deserializer(read, custom_types)
field2 = des2.deserialize(read)
self._deserializer = ValueValueDeserializer(des1, des2)
return (field1, field2)
else:
raise Exception("Invalid type ID encountered: " + str(ord(type)))
return self._deserializer.deserialize(self._read)
else:
raise StopIteration
def has_next(self):
return self._connection.has_next(self._group)
def _reset(self):
self._deserializer = None
class PlanIterator(object):
def __init__(self, con, env):
self._connection = con
self._env = env
def next(self):
deserializer = _get_deserializer(self._connection.read, self._env._types)
return deserializer.deserialize(self._connection.read)
class DummyIterator(Iterator):
def __init__(self):
super(Iterator, self).__init__()
def __next__(self):
raise StopIteration
def next(self):
raise StopIteration
def has_next(self):
return False
#=====Deserializer======================================================================================================
def _get_deserializer(read, custom_types):
type = read(1)
if 0 < ord(type) < 26:
return TupleDeserializer([_get_deserializer(read, custom_types) for _ in range(ord(type))])
elif type == Types.TYPE_BYTE:
return ByteDeserializer()
elif type == Types.TYPE_BYTES:
return ByteArrayDeserializer()
elif type == Types.TYPE_BOOLEAN:
return BooleanDeserializer()
elif type == Types.TYPE_FLOAT:
return FloatDeserializer()
elif type == Types.TYPE_DOUBLE:
return DoubleDeserializer()
elif type == Types.TYPE_INTEGER:
return IntegerDeserializer()
elif type == Types.TYPE_LONG:
return LongDeserializer()
elif type == Types.TYPE_STRING:
return StringDeserializer()
elif type == Types.TYPE_NULL:
return NullDeserializer()
else:
for entry in custom_types:
if type == entry[0]:
return CustomTypeDeserializer(entry[3])
raise Exception("Unable to find deserializer for type ID " + str(ord(type)))
class Deserializer(object):
def get_type_info_size(self):
return 1
def deserialize(self, read):
pass
class ArrayDeserializer(Deserializer):
def __init__(self, deserializer):
self._deserializer = deserializer
self._d_skip = deserializer.get_type_info_size()
def deserialize(self, read):
read(1) #array type
read(self._d_skip)
return self._deserializer.deserialize(read)
class KeyValueDeserializer(Deserializer):
def __init__(self, key_deserializer, value_deserializer):
self._key_deserializer = [(k, k.get_type_info_size()) for k in key_deserializer]
self._value_deserializer = value_deserializer
self._value_deserializer_skip = value_deserializer.get_type_info_size()
def deserialize(self, read):
fields = []
read(1) #key value type
read(1) #key count
for dk in self._key_deserializer:
read(dk[1])
fields.append(dk[0].deserialize(read))
dv = self._value_deserializer
read(self._value_deserializer_skip)
return (tuple(fields), dv.deserialize(read))
class ValueValueDeserializer(Deserializer):
def __init__(self, d1, d2):
self._d1 = d1
self._d1_skip = self._d1.get_type_info_size()
self._d2 = d2
self._d2_skip = self._d2.get_type_info_size()
def deserialize(self, read):
read(1)
read(self._d1_skip)
f1 = self._d1.deserialize(read)
read(self._d2_skip)
f2 = self._d2.deserialize(read)
return (f1, f2)
class CustomTypeDeserializer(Deserializer):
def __init__(self, deserializer):
self._deserializer = deserializer
def deserialize(self, read):
read(4) #discard binary size
return self._deserializer.deserialize(read)
class TupleDeserializer(Deserializer):
def __init__(self, deserializer):
self._deserializer = deserializer
def get_type_info_size(self):
return 1 + sum([d.get_type_info_size() for d in self._deserializer])
def deserialize(self, read):
return tuple([s.deserialize(read) for s in self._deserializer])
class ByteDeserializer(Deserializer):
def deserialize(self, read):
return unpack(">c", read(1))[0]
class ByteArrayDeserializer(Deserializer):
def deserialize(self, read):
size = unpack(">i", read(4))[0]
return bytearray(read(size)) if size else bytearray(b"")
class BooleanDeserializer(Deserializer):
def deserialize(self, read):
return unpack(">?", read(1))[0]
class FloatDeserializer(Deserializer):
def deserialize(self, read):
return unpack(">f", read(4))[0]
class DoubleDeserializer(Deserializer):
def deserialize(self, read):
return unpack(">d", read(8))[0]
class IntegerDeserializer(Deserializer):
def deserialize(self, read):
return unpack(">i", read(4))[0]
class LongDeserializer(Deserializer):
def deserialize(self, read):
return unpack(">q", read(8))[0]
class StringDeserializer(Deserializer):
def deserialize(self, read):
length = unpack(">i", read(4))[0]
return read(length).decode("utf-8") if length else ""
class NullDeserializer(Deserializer):
def deserialize(self, read):
return None
|
alheinecke/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/python/training/input_test.py
|
18
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import input as inp
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.util import compat
class MatchFilenamesOnceTest(test_lib.TestCase):
def test(self):
temp_dir = self.get_temp_dir()
filenames = [os.path.join(temp_dir, n) for n in os.listdir(temp_dir)]
additional = [
os.path.join(self.get_temp_dir(), "match_filenames.%d" % i)
for i in range(3)
]
for name in additional:
open(name, "w").write("Some contents")
filenames = list(set(filenames + additional))
with self.test_session():
star = inp.match_filenames_once(os.path.join(self.get_temp_dir(), "*"))
question = inp.match_filenames_once(
os.path.join(self.get_temp_dir(), "match_filenames.?"))
one = inp.match_filenames_once(additional[1])
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
self.assertItemsEqual(map(compat.as_bytes, filenames), star.eval())
self.assertItemsEqual(map(compat.as_bytes, additional), question.eval())
self.assertItemsEqual([compat.as_bytes(additional[1])], one.eval())
class LimitEpochsTest(test_lib.TestCase):
def testNoLimit(self):
with self.test_session():
seven = constant_op.constant(7)
seven_forever = inp.limit_epochs(seven)
variables.local_variables_initializer().run()
for _ in range(100):
self.assertEqual(7, seven_forever.eval())
def testLimit(self):
with self.test_session():
love_me = constant_op.constant("Love Me")
love_me_two_times = inp.limit_epochs(love_me, num_epochs=2)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
self.assertEqual(b"Love Me", love_me_two_times.eval())
self.assertEqual(b"Love Me", love_me_two_times.eval())
with self.assertRaises(errors_impl.OutOfRangeError):
love_me_two_times.eval()
class InputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.test_session():
input_tensor = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
num_epochs = 2
queue = inp.input_producer(
input_tensor, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_tensor) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_tensor * num_epochs, dequeue_many.eval())
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testNoShapeInference(self):
with self.test_session():
# Disable shape inference for the input.
input_value = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
input_tensor = array_ops.placeholder_with_default(input_value, shape=None)
num_epochs = 2
queue = inp.input_producer(
input_tensor, element_shape=[4], num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_value) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_value * num_epochs, dequeue_many.eval())
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShapeError(self):
input_tensor = array_ops.placeholder(dtypes.float32, None)
with self.assertRaisesRegexp(ValueError, "fully defined shape"):
_ = inp.input_producer(input_tensor)
class StringInputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.test_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
num_epochs = 3
queue = inp.string_input_producer(
strings, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(strings) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = dequeue_many.eval()
self.assertAllEqual(strings * num_epochs, output)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session():
strings = [b"a", b"b", b"c"]
num_epochs = 600
queue = inp.string_input_producer(
strings, num_epochs=num_epochs, shuffle=True, seed=271828)
dequeue_many = queue.dequeue_many(len(strings))
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the strings within an epoch and
# count how often each possible order appears.
expected = [b"abc", b"acb", b"bac", b"bca", b"cab", b"cba"]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = dequeue_many.eval()
key = b"".join(output)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testNullStringPython(self):
# Graph-construction time check for empty string list:
with self.test_session():
with self.assertRaises(ValueError):
_ = inp.string_input_producer([])
def testNullString(self):
# Runtime check for empty string list. This is slightly oblique:
# The queue runner should die with an assertion error on the null
# input tensor, causing the dequeue to fail with an OutOfRangeError.
with self.test_session():
coord = coordinator.Coordinator()
queue = inp.string_input_producer(
constant_op.constant(
[], dtype=dtypes.string))
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners(coord=coord)
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
coord.request_stop()
for thread in threads:
thread.join()
def testSharedName(self):
with self.test_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
queue = inp.string_input_producer(
strings, shared_name="SHARED_NAME_XYZ", name="Q")
self.assertProtoEquals("s: 'SHARED_NAME_XYZ'",
queue.queue_ref.op.node_def.attr["shared_name"])
def testConstructionRace(self):
with self.test_session() as sess:
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
queue = inp.string_input_producer(strings, shuffle=False)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(2):
for string in strings:
# NOTE(mrry): This is not the recommended way to write
# dequeuing code (instead you should create a single dequeue
# op before starting the queue runners, and run it
# repeatedly), because it leads to concurrent reading and
# writing of the `tf.Graph` object. However, many users
# write code this way, so we include this test to ensure
# that we can support it.
self.assertEquals(string, sess.run(queue.dequeue()))
coord.request_stop()
coord.join(threads)
class RangeInputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.test_session():
num_epochs = 3
range_size = 5
queue = inp.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(range_size * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = dequeue_many.eval()
self.assertAllEqual(list(xrange(range_size)) * num_epochs, output)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session():
num_epochs = 200
range_size = 2
queue = inp.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=True, seed=314159)
dequeue_many = queue.dequeue_many(range_size)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [12, 21]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = dequeue_many.eval()
key = 10 * (output[0] + 1) + (output[1] + 1)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testSharedName(self):
with self.test_session():
range_size = 5
queue = inp.range_input_producer(
range_size, shared_name="SHARED_NAME_XYZ", name="Q")
self.assertProtoEquals("s: 'SHARED_NAME_XYZ'",
queue.queue_ref.op.node_def.attr["shared_name"])
class SliceInputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.test_session() as sess:
num_epochs = 3
source_strings = [b"Alpha", b"Beta", b"Delta", b"Gamma"]
source_ints = [2, 3, 5, 7]
slices = inp.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=False)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
num_items = len(source_strings) * num_epochs
output = [sess.run(slices) for _ in range(num_items)]
out_strings, out_ints = zip(*output)
self.assertAllEqual(source_strings * num_epochs, out_strings)
self.assertAllEqual(source_ints * num_epochs, out_ints)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session() as sess:
num_epochs = 1200
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = inp.slice_input_producer(
[source_strings, source_ints],
num_epochs=num_epochs,
shuffle=True,
seed=161803)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [
b",".join(x)
for x in itertools.permutations([b"A7", b"B3", b"D5", b"G2"])
]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = [sess.run(slices) for _ in range(len(source_strings))]
key = b",".join([s + compat.as_bytes(str(i)) for s, i in output])
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
def testSharedName(self):
with self.test_session():
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = inp.slice_input_producer(
[source_strings, source_ints],
shared_name="SHARED_NAME_XYZ",
name="sip")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
slices[0].op.inputs[1].op.inputs[0].op.node_def.attr["shared_name"])
class DictHelperTest(test_lib.TestCase):
def testListInputs(self):
l = [1, 2, 3, 11, 22, 33]
l2 = inp._as_tensor_list(l)
self.assertEquals(l, l2)
l3 = inp._as_original_type(l, l2)
self.assertEquals(l, l3)
def testDictInputs(self):
d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
l = inp._as_tensor_list(d)
self.assertEquals([1, 11, 2, 22, 3, 33], l)
d2 = inp._as_original_type(d, l)
self.assertEquals(d, d2)
class BatchTest(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
if use_dict:
batched = inp.batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch(
[counter, sparse_counter, "string"], batch_size=batch_size)
batched_fetch = batched
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
def testOneThreadDynamicPad(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
string = array_ops.tile(["string"],
math_ops.to_int32(array_ops.stack([counter])))
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
batched = inp.batch(
[counter, string], batch_size=batch_size, dynamic_pad=True)
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
expected_results = np.arange(i * batch_size, (i + 1) * batch_size)
max_len = expected_results[-1]
self.assertAllEqual(results[0], expected_results)
expected_strings = [[b"string"] * rep + [b""] * (max_len - rep)
for rep in expected_results]
self.assertAllEqual(results[1], expected_strings)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testOneThreadEnqueueMany(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
pre_batched = inp.batch([counter, sparse_counter, "string"], batch_size=2)
batched = inp.batch(pre_batched, enqueue_many=True, batch_size=batch_size)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].values,
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testOneThreadSmallerBatch(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = sess.run(batched)
self.assertAllEqual(results[0],
np.arange(num_batches * batch_size,
num_batches * batch_size + extra_elements))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * extra_elements) // 2, # 0, 0, 1, 1, ...
[0, 1] * extra_elements)).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 2])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreadsSmallerBatch(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = sess.run(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(extra_elements), np.zeros(extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
self.assertItemsEqual(all_counts,
range(num_batches * batch_size + extra_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with self.test_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch(
[counter, "string"],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def testCannotInferRankError(self):
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
inp.batch([x], batch_size=2)
def testBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testSingleElementDict(self):
x = inp.batch({"c": [12, 12]}, batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.test_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch(
to_batch,
keep_input,
batch_size,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testMaybeEnqueuePerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadMaybeEnqueuePerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch(
[sparse], keep_input=True, batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch(
[sparse], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch(
[sparse], keep_input=True, batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch(
[sparse], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
class BatchJoinTest(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with self.test_session() as sess:
# Two threads, the first generates (0..69, "a").
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(batch_size, len(results[0]))
self.assertEqual(batch_size, len(results[2]))
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
def testMismatchedDictKeys(self):
with self.assertRaisesRegexp(ValueError, "must have the same keys"):
inp.batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8)
def testTwoThreadsDynamicPad(self):
with self.test_session() as sess:
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(["a"],
math_ops.to_int32(array_ops.stack([counter + 1])))
b = array_ops.tile(["b"],
math_ops.to_int32(array_ops.stack([ninety_nine])))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size, None), batched[1].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
self.assertEqual(2, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testTwoThreadsSmallerBatch(self):
with self.test_session() as sess:
extra_elements = 2
# Two threads, the first generates (0..69, "a").
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = sess.run(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testTwoThreadsDynamicPadSmallerBatch(self):
with self.test_session() as sess:
extra_elements = 2
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(["a"],
math_ops.to_int32(array_ops.stack([counter + 1])))
b = array_ops.tile(["b"],
math_ops.to_int32(array_ops.stack([ninety_nine])))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, None), batched[1].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = sess.run(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[1]), 2 * extra_elements)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with self.test_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch_join(
[[counter, "string"]],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def testCannotInferRankError(self):
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
inp.batch_join([[x]], batch_size=2)
def testSingleElementDict(self):
x = inp.batch_join([{"c": [12, 12]}], batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.test_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch_join(
[to_batch] * num_threads,
keep_input,
batch_size,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[0], 2),)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[1].values, 2),)
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=True, batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=True, batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
class ShuffleBatchTest(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
if use_dict:
batched = inp.shuffle_batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421)
batched_fetch = batched
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
def testOneThreadSmallerBatch(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
total_elements = num_batches * batch_size + extra_elements
counter = examples.count_up_to(total_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421,
allow_smaller_final_batch=True)
batched_fetch = batched
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for _ in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra elements.
results = sess.run(batched)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
all_counts.extend(results[0])
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(total_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=173205,
num_threads=4)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreadsSmallerBatch(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
total_elements = num_batches * batch_size + extra_elements
counter = examples.count_up_to(total_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=173205,
num_threads=4,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra elements.
results = sess.run(batched)
self.assertAllEqual(results[0].shape, [extra_elements])
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
all_counts.extend(results[0])
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(total_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with self.test_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.shuffle_batch(
[counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=10,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.test_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_shuffle_batch(
to_batch,
batch_size,
10,
1,
keep_input,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=constant_op.constant([True, False]),
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=constant_op.constant([[True]]),
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=array_ops.placeholder(dtypes.bool),
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch([sparse], 2, 10, 1, True)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, True, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, [True, False], enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch([sparse], 2, 10, 1, True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, True, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, [True, False], enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
class ShuffleBatchJoinTest(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with self.test_session() as sess:
# Two threads, the first generates (0..24, "a").
num_a = 25
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 35 times and then stops.
num_b = 35
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.shuffle_batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.shuffle_batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
def testTwoThreadsSmallerBatch(self):
with self.test_session() as sess:
# Two threads, the first generates (0..26, "a").
extra_elements = 2
num_a = 25 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 37 times and then stops.
num_b = 35 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.shuffle_batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached end with 2 * extra_elements left
results = sess.run(batched)
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled, including extras.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testMismatchedDictKeys(self):
with self.assertRaisesRegexp(ValueError, "must have the same keys"):
inp.shuffle_batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8,
capacity=32,
min_after_dequeue=16,
seed=223607)
def testSharedName(self):
with self.test_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.shuffle_batch_join(
[[counter, "string"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=10,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.test_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_shuffle_batch_join(
[to_batch] * num_threads,
batch_size,
10,
1,
keep_input,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=constant_op.constant([True, False]),
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=constant_op.constant([[True]]),
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=array_ops.placeholder(dtypes.bool),
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, True, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, [True, False], enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, True, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, [True, False], enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
if __name__ == "__main__":
test_lib.main()
|
Suwmlee/XX-Net
|
refs/heads/python3
|
Python3/lib/encodings/iso8859_6.py
|
37
|
""" Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-6',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe'
'\ufffe'
'\ufffe'
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u060c' # 0xAC -> ARABIC COMMA
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u061b' # 0xBB -> ARABIC SEMICOLON
'\ufffe'
'\ufffe'
'\ufffe'
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\ufffe'
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
DailyActie/Surrogate-Model
|
refs/heads/master
|
01-codes/pyOpt-1.2.0/examples/rosenbrock.py
|
1
|
#!/usr/bin/env python
'''
Solves Rosenbrock's Unconstrained Problem.
min 100*(x2-x1^2)**2 + (1-x1)^2
s.t.: -10 <= xi <= 10, i = 1,2
f* = 0 , x* = [1, 1]
'''
# =============================================================================
# Standard Python modules
# =============================================================================
# =============================================================================
# Extension modules
# =============================================================================
# from pyOpt import *
from pyOpt import COBYLA
from pyOpt import CONMIN
from pyOpt import KSOPT
from pyOpt import NSGA2
from pyOpt import Optimization
from pyOpt import PSQP
from pyOpt import SDPEN
from pyOpt import SLSQP
from pyOpt import SOLVOPT
# =============================================================================
#
# =============================================================================
def objfunc(x):
f = 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
g = []
fail = 0
return f, g, fail
# =============================================================================
#
# =============================================================================
opt_prob = Optimization('Rosenbrock Unconstraint Problem', objfunc)
opt_prob.addVar('x1', 'c', lower=-10.0, upper=10.0, value=-3.0)
opt_prob.addVar('x2', 'c', lower=-10.0, upper=10.0, value=-4.0)
opt_prob.addObj('f')
print opt_prob
# Instantiate Optimizer (PSQP) & Solve Problem
psqp = PSQP()
psqp.setOption('IPRINT', 0)
psqp(opt_prob, sens_type='FD')
print opt_prob.solution(0)
# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT', -1)
slsqp(opt_prob, sens_type='FD')
print opt_prob.solution(1)
# Instantiate Optimizer (CONMIN) & Solve Problem
conmin = CONMIN()
conmin.setOption('IPRINT', 0)
conmin(opt_prob, sens_type='CS')
print opt_prob.solution(2)
# Instantiate Optimizer (COBYLA) & Solve Problem
cobyla = COBYLA()
cobyla.setOption('IPRINT', 0)
cobyla(opt_prob)
print opt_prob.solution(3)
# Instantiate Optimizer (SOLVOPT) & Solve Problem
solvopt = SOLVOPT()
solvopt.setOption('iprint', -1)
solvopt(opt_prob, sens_type='FD')
print opt_prob.solution(4)
# Instantiate Optimizer (KSOPT) & Solve Problem
ksopt = KSOPT()
ksopt.setOption('IPRINT', 0)
ksopt(opt_prob, sens_type='FD')
print opt_prob.solution(5)
# Instantiate Optimizer (NSGA2) & Solve Problem
nsga2 = NSGA2()
nsga2.setOption('PrintOut', 0)
nsga2(opt_prob)
print opt_prob.solution(6)
# Instantiate Optimizer (SDPEN) & Solve Problem
sdpen = SDPEN()
sdpen.setOption('iprint', -1)
sdpen(opt_prob)
print opt_prob.solution(7)
|
MarkusH/pelican-plugins
|
refs/heads/master
|
better_tables/__init__.py
|
57
|
# Copyright (c) 2015 Alex Waite
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
__title__ = 'better-tables'
__version__ = '0.1.0'
__author__ = 'Alex Waite'
__credits__ = ["Alex Waite"]
__maintainer__ = "Alex Waite"
__email__ = "Alexqw85@gmail.com"
__status__ = "Stable"
__license__ = 'MIT'
__copyright__ = 'Copyright 2015'
from .better_tables import *
|
provaleks/o8
|
refs/heads/8.0
|
addons/auth_oauth/res_config.py
|
292
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv, fields
import logging
_logger = logging.getLogger(__name__)
class base_config_settings(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'auth_oauth_google_enabled' : fields.boolean('Allow users to sign in with Google'),
'auth_oauth_google_client_id' : fields.char('Client ID'),
'auth_oauth_facebook_enabled' : fields.boolean('Allow users to sign in with Facebook'),
'auth_oauth_facebook_client_id' : fields.char('Client ID'),
}
def default_get(self, cr, uid, fields, context=None):
res = super(base_config_settings, self).default_get(cr, uid, fields, context=context)
res.update(self.get_oauth_providers(cr, uid, fields, context=context))
return res
def get_oauth_providers(self, cr, uid, fields, context=None):
google_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_google')[1]
facebook_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_facebook')[1]
rg = self.pool.get('auth.oauth.provider').read(cr, uid, [google_id], ['enabled','client_id'], context=context)
rf = self.pool.get('auth.oauth.provider').read(cr, uid, [facebook_id], ['enabled','client_id'], context=context)
return {
'auth_oauth_google_enabled': rg[0]['enabled'],
'auth_oauth_google_client_id': rg[0]['client_id'],
'auth_oauth_facebook_enabled': rf[0]['enabled'],
'auth_oauth_facebook_client_id': rf[0]['client_id'],
}
def set_oauth_providers(self, cr, uid, ids, context=None):
google_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_google')[1]
facebook_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'auth_oauth', 'provider_facebook')[1]
config = self.browse(cr, uid, ids[0], context=context)
rg = {
'enabled':config.auth_oauth_google_enabled,
'client_id':config.auth_oauth_google_client_id,
}
rf = {
'enabled':config.auth_oauth_facebook_enabled,
'client_id':config.auth_oauth_facebook_client_id,
}
self.pool.get('auth.oauth.provider').write(cr, uid, [google_id], rg)
self.pool.get('auth.oauth.provider').write(cr, uid, [facebook_id], rf)
|
evaschalde/odoo
|
refs/heads/master
|
addons/account_anglo_saxon/product.py
|
384
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
#Redefine fields to change help text for anglo saxon methodology.
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used to value outgoing stock using sale price."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used to value outgoing stock using cost price."),
}
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
#Redefine fields to change help text for anglo saxon methodology.
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used to value outgoing stock using sale price."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used to value outgoing stock using cost price."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Aalto-LeTech/a-plus
|
refs/heads/master
|
external_services/api/authentication.py
|
1
|
import datetime
import logging
import oauthlib.oauth1.rfc5849
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from external_services.models import LTIService
from .oauth_nonce_cache import OAuthNonceCache
from .parsers import parse_sourced_id
from userprofile.models import LTIServiceUser
logger = logging.getLogger('aplus.external_services.api')
def verify_oauth_body_hash_and_signature(request, req_body_hash, lti_exercise=None):
'''
Verify that the request has valid OAuth 1.0 signature and body hash.
@param request Django HttpRequest
@param req_body_hash base64-encoded SHA-1 hash of the request body (string)
@param lti_exercise the instance of the LTIExercise is used to verify that
the LTI service set for the exercise matches the oauth_consumer_key parameter
of the request.
@return tuple (boolean, error_message) boolean is True if verification succeeded,
False otherwise.
'''
headers = {
'Content-Type': request.content_type,
'Authorization': request.META.get('HTTP_AUTHORIZATION'),
'Host': request.META.get('HTTP_HOST'),
}
# all OAuth parameters must be given in the HTTP Authorization header
# (not in POST data or GET query parameters) when a body hash is used
# to secure the request body
all_req_oauth_params = oauthlib.oauth1.rfc5849.signature.collect_parameters(
headers=headers, exclude_oauth_signature=False)
# collect_parameters returns a list of key-value pairs
req_oauth_params_dict = dict(all_req_oauth_params)
# check oauth_consumer_key and find the corresponding secret
consumer_key = req_oauth_params_dict.get('oauth_consumer_key')
if not consumer_key:
return False, 'oauth_consumer_key missing'
try:
lti_service = LTIService.objects.get(consumer_key=consumer_key)
except (LTIService.DoesNotExist, LTIService.MultipleObjectsReturned):
return False, 'unknown oauth_consumer_key'
if lti_exercise and lti_exercise.lti_service.pk != lti_service.pk:
# the consumer key refers to a different LTI service than the exercise
return False, 'oauth_consumer_key mismatch'
client_secret = lti_service.consumer_secret
# check the OAuth timestamp. Do not allow old requests in order to prevent replay attacks.
try:
timestamp = datetime.datetime.utcfromtimestamp(int(req_oauth_params_dict.get('oauth_timestamp')))
# oauth_timestamp: seconds since January 1, 1970 00:00:00 GMT
except ValueError:
return False, 'oauth_timestamp is missing or has an invalid format'
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=OAuthNonceCache.CACHE_TIMEOUT_SECONDS)
if not (now - delta < timestamp and timestamp < now + delta):
return False, 'oauth_timestamp has expired'
# check OAuth nonce: The nonce value MUST be unique across all requests with
# the same timestamp, client credentials, and token combinations.
# Previously seen nonces are kept in the cache for a few minutes
# (the duration must match the accepted timestamp age).
nonce = req_oauth_params_dict.get('oauth_nonce')
if not nonce:
return False, 'oauth_nonce missing'
nonce_cache = OAuthNonceCache(nonce, req_oauth_params_dict.get('oauth_timestamp'), client_secret)
if nonce_cache.nonce_used():
return False, 'oauth_nonce has been used'
if req_body_hash != req_oauth_params_dict.get('oauth_body_hash'):
return False, 'oauth_body_hash verification failed'
# verify the signature
oauth_request = oauthlib.common.Request(request.build_absolute_uri(), http_method=request.method, headers=headers)
# unfortunately, the request class is simple and we have to set the OAuth parameters manually like this
oauth_signature = req_oauth_params_dict.pop('oauth_signature')
oauth_request.params = list(req_oauth_params_dict.items()) # list of key-value pairs; must not include oauth_signature
oauth_request.signature = oauth_signature
if not oauthlib.oauth1.rfc5849.signature.verify_hmac_sha1(oauth_request, client_secret=client_secret):
return False, 'oauth_signature verification failed'
return True, ''
class OAuthBodyHashAuthentication(BaseAuthentication):
def authenticate(self, request):
if 'HTTP_AUTHORIZATION' not in request.META:
return None
data = request.data # activates the request body parser if the body has not been parsed yet
# assume that the request was parsed with the LTI Outcome parser, so
# the request.data contains the following keys
exercise, user_profile = parse_sourced_id(data.get('sourced_id', ''))
if exercise is None or user_profile is None:
# can not find the exercise or user corresponding to the sourced id
logger.warning('Invalid sourcedId in LTI Outcomes request: %s',
data.get('sourced_id', ''))
raise AuthenticationFailed('Invalid sourcedId')
data['exercise'] = exercise
data['submitter'] = user_profile
req_body_hash = data.get('body_hash')
if not req_body_hash:
error_msg = 'Request body hash can not be verified'
logger.error(error_msg)
raise AuthenticationFailed(error_msg)
if not exercise.lti_service:
error_msg = 'No LTI service set for the exercise'
logger.error(error_msg)
raise AuthenticationFailed(error_msg)
oauth_ok, msg = verify_oauth_body_hash_and_signature(request, req_body_hash, exercise)
if not oauth_ok:
error_msg = 'OAuth verification failed: ' + msg
logger.warning(error_msg)
raise AuthenticationFailed(error_msg)
user = LTIServiceUser(exercise=exercise, lti_service=exercise.lti_service, student_id=user_profile.user.id)
return (user, None)
|
atsidaev/gdb-z80
|
refs/heads/master
|
gdb/contrib/excheck.py
|
41
|
# Copyright 2011-2014 Free Software Foundation, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# This is a GCC plugin that computes some exception-handling data for
# gdb. This data can then be summarized and checked by the
# exsummary.py script.
# To use:
# * First, install the GCC Python plugin. See
# https://fedorahosted.org/gcc-python-plugin/
# * export PYTHON_PLUGIN=/full/path/to/plugin/directory
# This should be the directory holding "python.so".
# * cd build/gdb; make mostlyclean
# * make CC=.../gcc-with-excheck
# This will write a number of .py files in the build directory.
# * python .../exsummary.py
# This will show the violations.
import gcc
import gccutils
import sys
# Where our output goes.
output_file = None
# Cleanup functions require special treatment, because they take a
# function argument, but in theory the function must be nothrow.
cleanup_functions = {
'make_cleanup': 1,
'make_cleanup_dtor': 1,
'make_final_cleanup': 1,
'make_my_cleanup2': 1,
'make_my_cleanup': 1
}
# Functions which may throw but which we want to ignore.
ignore_functions = {
# This one is super special.
'exceptions_state_mc': 1,
# gdb generally pretends that internal_error cannot throw, even
# though it can.
'internal_error': 1,
# do_cleanups and friends are supposedly nothrow but we don't want
# to run afoul of the indirect function call logic.
'do_cleanups': 1,
'do_final_cleanups': 1
}
# Functions which take a function argument, but which are not
# interesting, usually because the argument is not called in the
# current context.
non_passthrough_functions = {
'signal': 1,
'add_internal_function': 1
}
# Return True if the type is from Python.
def type_is_pythonic(t):
if isinstance(t, gcc.ArrayType):
t = t.type
if not isinstance(t, gcc.RecordType):
return False
# Hack.
return str(t).find('struct Py') == 0
# Examine all the fields of a struct. We don't currently need any
# sort of recursion, so this is simple for now.
def examine_struct_fields(initializer):
global output_file
for idx2, value2 in initializer.elements:
if isinstance(idx2, gcc.Declaration):
if isinstance(value2, gcc.AddrExpr):
value2 = value2.operand
if isinstance(value2, gcc.FunctionDecl):
output_file.write("declare_nothrow(%s)\n"
% repr(str(value2.name)))
# Examine all global variables looking for pointers to functions in
# structures whose types were defined by Python.
def examine_globals():
global output_file
vars = gcc.get_variables()
for var in vars:
if not isinstance(var.decl, gcc.VarDecl):
continue
output_file.write("################\n")
output_file.write("# Analysis for %s\n" % var.decl.name)
if not var.decl.initial:
continue
if not type_is_pythonic(var.decl.type):
continue
if isinstance(var.decl.type, gcc.ArrayType):
for idx, value in var.decl.initial.elements:
examine_struct_fields(value)
else:
gccutils.check_isinstance(var.decl.type, gcc.RecordType)
examine_struct_fields(var.decl.initial)
# Called at the end of compilation to write out some data derived from
# globals and to close the output.
def close_output(*args):
global output_file
examine_globals()
output_file.close()
# The pass which derives some exception-checking information. We take
# a two-step approach: first we get a call graph from the compiler.
# This is emitted by the plugin as Python code. Then, we run a second
# program that reads all the generated Python and uses it to get a
# global view of exception routes in gdb.
class GdbExceptionChecker(gcc.GimplePass):
def __init__(self, output_file):
gcc.GimplePass.__init__(self, 'gdb_exception_checker')
self.output_file = output_file
def log(self, obj):
self.output_file.write("# %s\n" % str(obj))
# Return true if FN is a call to a method on a Python object.
# We know these cannot throw in the gdb sense.
def fn_is_python_ignorable(self, fn):
if not isinstance(fn, gcc.SsaName):
return False
stmt = fn.def_stmt
if not isinstance(stmt, gcc.GimpleAssign):
return False
if stmt.exprcode is not gcc.ComponentRef:
return False
rhs = stmt.rhs[0]
if not isinstance(rhs, gcc.ComponentRef):
return False
if not isinstance(rhs.field, gcc.FieldDecl):
return False
return rhs.field.name == 'tp_dealloc' or rhs.field.name == 'tp_free'
# Decode a function call and write something to the output.
# THIS_FUN is the enclosing function that we are processing.
# FNDECL is the call to process; it might not actually be a DECL
# node.
# LOC is the location of the call.
def handle_one_fndecl(self, this_fun, fndecl, loc):
callee_name = ''
if isinstance(fndecl, gcc.AddrExpr):
fndecl = fndecl.operand
if isinstance(fndecl, gcc.FunctionDecl):
# Ordinary call to a named function.
callee_name = str(fndecl.name)
self.output_file.write("function_call(%s, %s, %s)\n"
% (repr(callee_name),
repr(this_fun.decl.name),
repr(str(loc))))
elif self.fn_is_python_ignorable(fndecl):
# Call to tp_dealloc.
pass
elif (isinstance(fndecl, gcc.SsaName)
and isinstance(fndecl.var, gcc.ParmDecl)):
# We can ignore an indirect call via a parameter to the
# current function, because this is handled via the rule
# for passthrough functions.
pass
else:
# Any other indirect call.
self.output_file.write("has_indirect_call(%s, %s)\n"
% (repr(this_fun.decl.name),
repr(str(loc))))
return callee_name
# This does most of the work for examine_one_bb.
# THIS_FUN is the enclosing function.
# BB is the basic block to process.
# Returns True if this block is the header of a TRY_CATCH, False
# otherwise.
def examine_one_bb_inner(self, this_fun, bb):
if not bb.gimple:
return False
try_catch = False
for stmt in bb.gimple:
loc = stmt.loc
if not loc:
loc = this_fun.decl.location
if not isinstance(stmt, gcc.GimpleCall):
continue
callee_name = self.handle_one_fndecl(this_fun, stmt.fn, loc)
if callee_name == 'exceptions_state_mc_action_iter':
try_catch = True
global non_passthrough_functions
if callee_name in non_passthrough_functions:
continue
# We have to specially handle calls where an argument to
# the call is itself a function, e.g., qsort. In general
# we model these as "passthrough" -- we assume that in
# addition to the call the qsort there is also a call to
# the argument function.
for arg in stmt.args:
# We are only interested in arguments which are functions.
t = arg.type
if isinstance(t, gcc.PointerType):
t = t.dereference
if not isinstance(t, gcc.FunctionType):
continue
if isinstance(arg, gcc.AddrExpr):
arg = arg.operand
global cleanup_functions
if callee_name in cleanup_functions:
if not isinstance(arg, gcc.FunctionDecl):
gcc.inform(loc, 'cleanup argument not a DECL: %s' % repr(arg))
else:
# Cleanups must be nothrow.
self.output_file.write("declare_cleanup(%s)\n"
% repr(str(arg.name)))
else:
# Assume we have a passthrough function, like
# qsort or an iterator. We model this by
# pretending there is an ordinary call at this
# point.
self.handle_one_fndecl(this_fun, arg, loc)
return try_catch
# Examine all the calls in a basic block and generate output for
# them.
# THIS_FUN is the enclosing function.
# BB is the basic block to examine.
# BB_WORKLIST is a list of basic blocks to work on; we add the
# appropriate successor blocks to this.
# SEEN_BBS is a map whose keys are basic blocks we have already
# processed. We use this to ensure that we only visit a given
# block once.
def examine_one_bb(self, this_fun, bb, bb_worklist, seen_bbs):
try_catch = self.examine_one_bb_inner(this_fun, bb)
for edge in bb.succs:
if edge.dest in seen_bbs:
continue
seen_bbs[edge.dest] = 1
if try_catch:
# This is bogus, but we magically know the right
# answer.
if edge.false_value:
bb_worklist.append(edge.dest)
else:
bb_worklist.append(edge.dest)
# Iterate over all basic blocks in THIS_FUN.
def iterate_bbs(self, this_fun):
# Iteration must be in control-flow order, because if we see a
# TRY_CATCH construct we need to drop all the contained blocks.
bb_worklist = [this_fun.cfg.entry]
seen_bbs = {}
seen_bbs[this_fun.cfg.entry] = 1
for bb in bb_worklist:
self.examine_one_bb(this_fun, bb, bb_worklist, seen_bbs)
def execute(self, fun):
if fun and fun.cfg and fun.decl:
self.output_file.write("################\n")
self.output_file.write("# Analysis for %s\n" % fun.decl.name)
self.output_file.write("define_function(%s, %s)\n"
% (repr(fun.decl.name),
repr(str(fun.decl.location))))
global ignore_functions
if fun.decl.name not in ignore_functions:
self.iterate_bbs(fun)
def main(**kwargs):
global output_file
output_file = open(gcc.get_dump_base_name() + '.gdb_exc.py', 'w')
# We used to use attributes here, but there didn't seem to be a
# big benefit over hard-coding.
output_file.write('declare_throw("throw_exception")\n')
output_file.write('declare_throw("throw_verror")\n')
output_file.write('declare_throw("throw_vfatal")\n')
output_file.write('declare_throw("throw_error")\n')
gcc.register_callback(gcc.PLUGIN_FINISH_UNIT, close_output)
ps = GdbExceptionChecker(output_file)
ps.register_after('ssa')
main()
|
datacratic/StarCluster
|
refs/heads/vanilla_improvements
|
starcluster/tests/templates/sge_balancer.py
|
19
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
qhost_xml = """<?xml version='1.0'?>
<qhost xmlns:xsd="http://gridengine.sunsource.net/source/browse/*checkout*/\
gridengine/source/dist/util/resources/schemas/qhost/qhost.xsd?revision=1.2">
<host name='global'>
<hostvalue name='arch_string'>-</hostvalue>
<hostvalue name='num_proc'>-</hostvalue>
<hostvalue name='load_avg'>-</hostvalue>
<hostvalue name='mem_total'>-</hostvalue>
<hostvalue name='mem_used'>-</hostvalue>
<hostvalue name='swap_total'>-</hostvalue>
<hostvalue name='swap_used'>-</hostvalue>
</host>
<host name='ip-10-196-142-180.ec2.internal'>
<hostvalue name='arch_string'>lx24-x86</hostvalue>
<hostvalue name='num_proc'>1</hostvalue>
<hostvalue name='load_avg'>0.03</hostvalue>
<hostvalue name='mem_total'>1.7G</hostvalue>
<hostvalue name='mem_used'>75.4M</hostvalue>
<hostvalue name='swap_total'>896.0M</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='ip-10-196-214-162.ec2.internal'>
<hostvalue name='arch_string'>lx24-x86</hostvalue>
<hostvalue name='num_proc'>1</hostvalue>
<hostvalue name='load_avg'>0.21</hostvalue>
<hostvalue name='mem_total'>1.7G</hostvalue>
<hostvalue name='mem_used'>88.9M</hostvalue>
<hostvalue name='swap_total'>896.0M</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='ip-10-196-215-50.ec2.internal'>
<hostvalue name='arch_string'>lx24-x86</hostvalue>
<hostvalue name='num_proc'>1</hostvalue>
<hostvalue name='load_avg'>0.06</hostvalue>
<hostvalue name='mem_total'>1.7G</hostvalue>
<hostvalue name='mem_used'>75.9M</hostvalue>
<hostvalue name='swap_total'>896.0M</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
</qhost>"""
qstat_xml = """<?xml version='1.0'?>
<job_info xmlns:xsd="http://gridengine.sunsource.net/source/browse/*checkout*\
/gridengine/source/dist/util/resources/schemas/qstat/qstat.xsd?revision=1.11">
<queue_info>
<Queue-List>
<name>all.q@ip-10-196-142-180.ec2.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
<job_list state="running">
<JB_job_number>1</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-06-18T23:39:24</JAT_start_time>
<queue_name>all.q@ip-10-196-142-180.ec2.internal</queue_name>
<slots>1</slots>
</job_list>
</Queue-List>
<Queue-List>
<name>all.q@ip-10-196-215-50.ec2.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
<job_list state="running">
<JB_job_number>2</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-06-18T23:39:24</JAT_start_time>
<queue_name>all.q@ip-10-196-215-50.ec2.internal</queue_name>
<slots>1</slots>
</job_list>
</Queue-List>
<Queue-List>
<name>all.q@ip-10-196-214-162.ec2.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
<job_list state="running">
<JB_job_number>3</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-06-18T23:39:24</JAT_start_time>
<queue_name>all.q@ip-10-196-214-162.ec2.internal</queue_name>
<slots>1</slots>
</job_list>
</Queue-List>
</queue_info>
<job_info>
<job_list state="pending">
<JB_job_number>4</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:14</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>5</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:14</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>6</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:14</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>7</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:15</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>8</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:15</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>9</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:16</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>10</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:16</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>11</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:17</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>12</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:35</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>13</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:35</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>14</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:36</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>15</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:36</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>16</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:37</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>17</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:37</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>18</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:38</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>19</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:38</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>20</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:38</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>21</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:39</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>22</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:39</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
<job_list state="pending">
<JB_job_number>23</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sleep</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-06-18T23:39:40</JB_submission_time>
<queue_name></queue_name>
<slots>1</slots>
</job_list>
</job_info>
</job_info>"""
loaded_qhost_xml = """<?xml version='2.0'?>
<qhost xmlns:xsd="http://gridengine.sunsource.net/source/browse/*checkout*/\
gridengine/source/dist/util/resources/schemas/qhost/qhost.xsd?revision=1.2">
<host name='global'>
<hostvalue name='arch_string'>-</hostvalue>
<hostvalue name='num_proc'>-</hostvalue>
<hostvalue name='load_avg'>-</hostvalue>
<hostvalue name='mem_total'>-</hostvalue>
<hostvalue name='mem_used'>-</hostvalue>
<hostvalue name='swap_total'>-</hostvalue>
<hostvalue name='swap_used'>-</hostvalue>
</host>
<host name='domU-12-31-39-0B-C4-61.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.32</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>997.4M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0B-C4-C1.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>9.65</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>1.0G</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0B-C6-51.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.25</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>996.6M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FC-31.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.21</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>997.2M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FC-71.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.10</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>997.0M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FC-D1.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.31</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>996.7M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FD-01.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.08</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>997.3M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FD-81.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.12</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>995.7M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FE-51.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.06</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>996.8M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
<host name='domU-12-31-39-0E-FE-71.compute-1.internal'>
<hostvalue name='arch_string'>lx24-amd64</hostvalue>
<hostvalue name='num_proc'>8</hostvalue>
<hostvalue name='load_avg'>8.17</hostvalue>
<hostvalue name='mem_total'>7.0G</hostvalue>
<hostvalue name='mem_used'>996.1M</hostvalue>
<hostvalue name='swap_total'>0.0</hostvalue>
<hostvalue name='swap_used'>0.0</hostvalue>
</host>
</qhost>"""
qacct_txt = """==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 2
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:18:33 2010
start_time Thu Jul 15 18:18:41 2010
end_time Thu Jul 15 18:19:41 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 771
ru_majflt 0
ru_nswap 0
ru_inblock 16
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 4
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A5-A1.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 1
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:18:31 2010
start_time Thu Jul 15 18:18:41 2010
end_time Thu Jul 15 18:19:41 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 792
ru_majflt 0
ru_nswap 0
ru_inblock 16
ru_oublock 160
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 86
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 4
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:18:35 2010
start_time Thu Jul 15 18:19:56 2010
end_time Thu Jul 15 18:20:56 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.010
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 773
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 1
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 0.000
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A5-A1.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 3
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:18:34 2010
start_time Thu Jul 15 18:19:56 2010
end_time Thu Jul 15 18:20:56 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.010
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 790
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 160
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 84
ru_nivcsw 0
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 6
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:18:38 2010
start_time Thu Jul 15 18:21:11 2010
end_time Thu Jul 15 18:22:11 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.010
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 773
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 1
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A5-A1.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 5
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:18:36 2010
start_time Thu Jul 15 18:21:11 2010
end_time Thu Jul 15 18:22:11 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 792
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 160
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 84
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 7
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:13 2010
start_time Thu Jul 15 18:34:26 2010
end_time Thu Jul 15 18:35:26 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.010
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 773
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 1
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 8
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:14 2010
start_time Thu Jul 15 18:35:41 2010
end_time Thu Jul 15 18:36:41 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.010
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 773
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 9
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:14 2010
start_time Thu Jul 15 18:36:56 2010
end_time Thu Jul 15 18:37:56 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.010
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 775
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 10
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:15 2010
start_time Thu Jul 15 18:38:11 2010
end_time Thu Jul 15 18:39:11 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 774
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 11
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:15 2010
start_time Thu Jul 15 18:39:26 2010
end_time Thu Jul 15 18:40:26 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.010
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 775
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 12
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:16 2010
start_time Thu Jul 15 18:40:41 2010
end_time Thu Jul 15 18:41:41 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 775
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 13
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:16 2010
start_time Thu Jul 15 18:41:56 2010
end_time Thu Jul 15 18:42:56 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 774
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 14
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:17 2010
start_time Thu Jul 15 18:43:11 2010
end_time Thu Jul 15 18:44:11 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 774
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.000
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 15
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:17 2010
start_time Thu Jul 15 18:44:26 2010
end_time Thu Jul 15 18:45:26 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.010
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 773
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 1
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 16
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:18 2010
start_time Thu Jul 15 18:45:41 2010
end_time Thu Jul 15 18:46:41 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.010
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 772
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 1
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 17
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:34:20 2010
start_time Thu Jul 15 18:46:56 2010
end_time Thu Jul 15 18:47:56 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 60
ru_utime 0.000
ru_stime 0.010
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 774
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 0
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
==============================================================
qname all.q
hostname domU-12-31-38-00-A6-41.compute-1.internal
group root
owner root
project NONE
department defaultdepartment
jobname sleep
jobnumber 18
taskid undefined
account sge
priority 0
qsub_time Thu Jul 15 18:50:58 2010
start_time Thu Jul 15 18:51:11 2010
end_time Thu Jul 15 19:01:11 2010
granted_pe NONE
slots 1
failed 0
exit_status 0
ru_wallclock 600
ru_utime 0.010
ru_stime 0.000
ru_maxrss 0
ru_ixrss 0
ru_ismrss 0
ru_idrss 0
ru_isrss 0
ru_minflt 773
ru_majflt 0
ru_nswap 0
ru_inblock 0
ru_oublock 8
ru_msgsnd 0
ru_msgrcv 0
ru_nsignals 0
ru_nvcsw 2
ru_nivcsw 1
cpu 0.010
mem 0.000
io 0.000
iow 0.000
maxvmem 2.902M
arid undefined
Total System Usage
WALLCLOCK UTIME STIME CPU \
MEMORY IO IOW
====================================================================\
============================================
1620 0.060 0.050 0.110 \
0.000 0.000 0.000
"""
loaded_qstat_xml = """<?xml version='1.0'?>
<job_info xmlns:xsd="http://gridengine.sunsource.net/source/browse/*checkout\
*/gridengine/source/dist/util/resources/schemas/qstat/qstat.xsd?revision=1.11">
<queue_info>
<Queue-List>
<name>all.q@domU-12-31-39-0B-C4-C1.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
<job_list state="running">
<JB_job_number>385</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-07-08T04:40:46</JAT_start_time>
<queue_name>\
all.q@domU-12-31-39-0B-C4-C1.compute-1.internal</queue_name>
<slots>20</slots>
</job_list>
<job_list state="running">
<JB_job_number>386</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-07-08T04:40:47</JAT_start_time>
<queue_name>\
all.q@domU-12-31-39-0B-C4-C1.compute-1.internal</queue_name>
<slots>20</slots>
</job_list>
<job_list state="running">
<JB_job_number>387</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-07-08T04:40:47</JAT_start_time>
<queue_name>\
all.q@domU-12-31-39-0B-C4-C1.compute-1.internal</queue_name>
<slots>20</slots>
</job_list>
<job_list state="running">
<JB_job_number>388</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>r</state>
<JAT_start_time>2010-07-08T04:40:47</JAT_start_time>
<queue_name>\
all.q@domU-12-31-39-0B-C4-C1.compute-1.internal</queue_name>
<slots>20</slots>
</job_list>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0B-C4-61.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0B-C6-51.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FC-31.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FC-71.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FC-D1.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FD-01.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FD-81.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FE-51.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
<Queue-List>
<name>all.q@domU-12-31-39-0E-FE-71.compute-1.internal</name>
<qtype>BIP</qtype>
<slots_used>0</slots_used>
<slots_resv>0</slots_resv>
<slots_total>8</slots_total>
<load_avg>0.01000</load_avg>
<arch>linux-x64</arch>
</Queue-List>
</queue_info>
<job_info>
<job_list state="pending">
<JB_job_number>389</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>390</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>391</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>392</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>393</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>394</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>395</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>396</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>397</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>398</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>399</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>400</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconico-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>401</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>402</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>403</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>404</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>405</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>406</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>407</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>408</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>409</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>410</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>411</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>412</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>413</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>414</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>415</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>416</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kconic-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>417</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>418</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>419</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>420</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>421</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>422</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>423</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>424</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>425</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>426</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:32</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>427</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>428</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>429</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>430</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>431</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>432</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcylo-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>433</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>434</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>435</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>436</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>437</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>438</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>439</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>440</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>441</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>442</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>443</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>444</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>445</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>446</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>447</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>448</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kcyl-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>449</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>450</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>451</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>452</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>453</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>454</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>455</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>456</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>457</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>458</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>459</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>460</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>461</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>462</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>463</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>464</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquado-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>465</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>466</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>467</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>468</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>469</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>470</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>471</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>472</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>473</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>474</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>475</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>476</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>477</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>478</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>479</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>480</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-haar-str-kquad-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:33</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>481</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>482</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>483</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>484</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>485</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>486</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>487</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>488</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>489</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>490</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>491</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>492</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>493</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>494</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>495</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>496</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconico-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>497</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>498</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>499</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>500</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>501</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>502</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>503</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>504</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>505</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>506</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>507</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>508</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>509</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>510</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>511</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>512</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kconic-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>513</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>514</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>515</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>516</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>517</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>518</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>519</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>520</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>521</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>522</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>523</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>524</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>525</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>526</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>527</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>528</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcylo-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>529</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>530</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>531</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>532</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>533</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:34</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>534</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>535</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>536</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>537</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>538</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>539</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>540</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>541</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>542</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>543</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>544</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kcyl-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>545</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>546</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>547</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>548</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>549</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>550</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>551</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>552</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>553</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>554</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>555</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>556</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>557</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>558</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>559</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>560</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquado-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>561</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r4-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>562</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r4-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>563</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r4-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>564</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r4-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>565</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r5-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>566</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r5-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>567</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r5-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>568</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r5-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>569</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r6-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>570</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r6-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>571</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r6-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>572</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r6-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>573</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r7-dc10</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>574</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r7-dc7</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>575</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r7-dc8</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
<job_list state="pending">
<JB_job_number>576</JB_job_number>
<JAT_prio>0.55500</JAT_prio>
<JB_name>sm-main-kquad-r7-dc9</JB_name>
<JB_owner>root</JB_owner>
<state>qw</state>
<JB_submission_time>2010-07-08T04:40:35</JB_submission_time>
<queue_name></queue_name>
<slots>20</slots>
</job_list>
</job_info>
</job_info>"""
|
li-yuntao/PoemWorld
|
refs/heads/master
|
ReadingAssistant/search/CentralEnt.py
|
2
|
# -*- coding:utf-8 -*-
from ReadingAssistant.models import *
from .GraphMaker import *
from django.db.models import Q
class CentralEnt:
def __init__(self, content, record):
self.nodeId = -1
self.content = content
self.record = record
def getContent(self):
return self.content
def addNode2Graph(self, graphMaker, centerFlag=False):
pass
#This function will return a QuerySet of candidate central entities
def executeQuery(self):
pass
def extendRels(self, graphMaker):
pass
class AuthorEnt(CentralEnt):
def __init__(self, content, record=None):
CentralEnt.__init__(self, content, record)
def executeQuery(self):
qset = Author.objects.filter(author_name=self.content)
if not qset.exists():
return False
self.record = qset[0]
self.content = self.record.author_name
return True
def addNode2Graph(self, graphMaker, centerFlag=False):
priKey = self.record.author_id
thumb_temp = self.record.author_head_thumb
if len(thumb_temp) != 0:
thumbPath = thumb_temp
else:
thumbPath = None
self.nodeId = graphMaker.addNode(priKey, self.content, "author", score=8, thumbPath=thumbPath, isCenter=centerFlag)
return self.nodeId
def extendRels(self, graphMaker):
newNodes = []
relset1 = Author_Poem.objects.filter(author=self.record.author_id)
for rel in relset1:
poem = rel.poem
node = PoemEnt(poem.poem_name, poem)
dstNodeId = node.addNode2Graph(graphMaker)
newNodes.append(node)
graphMaker.addLink(self.nodeId, dstNodeId, u"撰写")
relset2 = AuthorRelation.objects.filter(Q(author1=self.record.author_id) | Q(author2=self.record.author_id))
for rel in relset2:
if rel.author1.author_id == self.record.author_id:
anotherId = rel.author2.author_id
else:
anotherId = rel.author1.author_id
desc = rel.rel_desc
try:
anotherRecord = Author.objects.get(author_id=anotherId)
except:
continue
else:
node = AuthorEnt(anotherRecord.author_name, anotherRecord)
dstNodeId = node.addNode2Graph(graphMaker)
newNodes.append(node)
graphMaker.addLink(self.nodeId, dstNodeId, desc)
return newNodes
class PoemEnt(CentralEnt):
def __init__(self, content, record=None):
CentralEnt.__init__(self, content, record)
def executeQuery(self):
qset = Poem.objects.filter(poem_name__contains=self.content)
if not qset.exists():
return False
self.record = qset[0]
self.content = self.record.poem_name
return True
def addNode2Graph(self, graphMaker, centerFlag=False):
priKey = self.record.poem_id
self.nodeId = graphMaker.addNode(priKey, self.content, "poem", score=self.record.poem_score, isCenter=centerFlag)
return self.nodeId
def extendRels(self, graphMaker):
newNodes = []
relset1 = Author_Poem.objects.filter(poem=self.record.poem_id)
for rel in relset1:
author = rel.author
node = AuthorEnt(author.author_name, author)
dstNodeId = node.addNode2Graph(graphMaker)
newNodes.append(node)
graphMaker.addLink(self.nodeId, dstNodeId, u"撰写")
relset2 = Poem_Image.objects.filter(poem=self.record.poem_id)
for rel in relset2:
image = rel.image
node = ImageEnt(image.image_name, image)
dstNodeId = node.addNode2Graph(graphMaker)
newNodes.append(node)
graphMaker.addLink(self.nodeId, dstNodeId, u"使用意象")
return newNodes
class ImageEnt(CentralEnt):
def __init__(self, content, record=None):
CentralEnt.__init__(self, content, record)
def executeQuery(self):
qset = Image.objects.filter(image_name__contains=self.content)
if not qset.exists():
return False
self.record = qset[0]
self.content = self.record.image_name
return True
def addNode2Graph(self, graphMaker, centerFlag=False):
priKey = self.record.image_id
self.nodeId = graphMaker.addNode(priKey, self.content, "image", score=3, isCenter=centerFlag)
return self.nodeId
def extendRels(self, graphMaker):
newNodes = []
relset1 = Poem_Image.objects.filter(image=self.record.image_id)
for rel in relset1:
poem = rel.poem
node = PoemEnt(poem.poem_name, poem)
dstNodeId = node.addNode2Graph(graphMaker)
newNodes.append(node)
graphMaker.addLink(self.nodeId, dstNodeId, u"相关诗歌")
return newNodes
|
DBernardes/ProjetoECC
|
refs/heads/master
|
Ruido_de_Leitura/Codigo/CCDinfo.py
|
1
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Criado em 18 de Outubro de 2016
Descricao: este modulo tem como entrada o cabecalho de uma imagen fits e a quantidade de imagens da serie obtidam retornado uma string com as principais informacoes do CCD.
@author: Denis Varise Bernardes & Eder Martioli
Laboratorio Nacional de Astrofisica, Brazil.
"""
__version__ = "1.0"
__copyright__ = """
Copyright (c) ... All rights reserved.
"""
import matplotlib.pyplot as plt
def CCDinfo(header, nImages):
date = header['date'].split('T')
plt.xticks(())
plt.yticks(())
text = 'Camera: ' + header['head'] +'\n' + 'Data do experimento: %s %s ' %(date[0], date[1]) +'\n' + 'Quantidade de imagens: %i' %(nImages) + '\n' + 'Modo de Leitura: %s' %(header['ACQMODE']) + '\n' + 'Taxa de leitura: %.2f MHz'%(1/(header['readtime']*1000000)) + '\n' + 'Pre-amplificacao: %i' %(header['preamp']) + '\n' + 'VShift Speed: %.3f e-6' %(header['vshift']*1000000)
return text
|
vishalpant/Banking-management-system
|
refs/heads/master
|
Application.py
|
1
|
import Tkinter as tk
|
chouseknecht/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/k8s/scale.py
|
22
|
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
import math
import time
from ansible.module_utils.k8s.raw import KubernetesRawModule
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
try:
from openshift import watch
from openshift.dynamic.client import ResourceInstance
from openshift.helper.exceptions import KubernetesException
except ImportError as exc:
class KubernetesException(Exception):
pass
SCALE_ARG_SPEC = {
'replicas': {'type': 'int', 'required': True},
'current_replicas': {'type': 'int'},
'resource_version': {},
'wait': {'type': 'bool', 'default': True},
'wait_timeout': {'type': 'int', 'default': 20}
}
class KubernetesAnsibleScaleModule(KubernetesRawModule):
def execute_module(self):
definition = self.resource_definitions[0]
self.client = self.get_api_client()
name = definition['metadata']['name']
namespace = definition['metadata'].get('namespace')
api_version = definition['apiVersion']
kind = definition['kind']
current_replicas = self.params.get('current_replicas')
replicas = self.params.get('replicas')
resource_version = self.params.get('resource_version')
wait = self.params.get('wait')
wait_time = self.params.get('wait_timeout')
existing = None
existing_count = None
return_attributes = dict(changed=False, result=dict())
resource = self.find_resource(kind, api_version, fail=True)
try:
existing = resource.get(name=name, namespace=namespace)
return_attributes['result'] = existing.to_dict()
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc),
error=exc.value.get('status'))
if self.kind == 'job':
existing_count = existing.spec.parallelism
elif hasattr(existing.spec, 'replicas'):
existing_count = existing.spec.replicas
if existing_count is None:
self.fail_json(msg='Failed to retrieve the available count for the requested object.')
if resource_version and resource_version != existing.metadata.resourceVersion:
self.exit_json(**return_attributes)
if current_replicas is not None and existing_count != current_replicas:
self.exit_json(**return_attributes)
if existing_count != replicas:
return_attributes['changed'] = True
if not self.check_mode:
if self.kind == 'job':
existing.spec.parallelism = replicas
k8s_obj = resource.patch(existing.to_dict())
else:
k8s_obj = self.scale(resource, existing, replicas, wait, wait_time)
return_attributes['result'] = k8s_obj.to_dict()
self.exit_json(**return_attributes)
@property
def argspec(self):
args = copy.deepcopy(COMMON_ARG_SPEC)
args.pop('state')
args.pop('force')
args.update(AUTH_ARG_SPEC)
args.update(SCALE_ARG_SPEC)
return args
def scale(self, resource, existing_object, replicas, wait, wait_time):
name = existing_object.metadata.name
namespace = existing_object.metadata.namespace
if not hasattr(resource, 'scale'):
self.fail_json(
msg="Cannot perform scale on resource of kind {0}".format(resource.kind)
)
scale_obj = {'metadata': {'name': name, 'namespace': namespace}, 'spec': {'replicas': replicas}}
return_obj = None
stream = None
if wait:
w, stream = self._create_stream(resource, namespace, wait_time)
try:
resource.scale.patch(body=scale_obj)
except Exception as exc:
self.fail_json(
msg="Scale request failed: {0}".format(exc)
)
if wait and stream is not None:
return_obj = self._read_stream(resource, w, stream, name, replicas)
if not return_obj:
return_obj = self._wait_for_response(resource, name, namespace)
return return_obj
def _create_stream(self, resource, namespace, wait_time):
""" Create a stream of events for the object """
w = None
stream = None
try:
w = watch.Watch()
w._api_client = self.client.client
if namespace:
stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time)
else:
stream = w.stream(resource.get, serialize=False, namespace=namespace, timeout_seconds=wait_time)
except KubernetesException:
pass
return w, stream
def _read_stream(self, resource, watcher, stream, name, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
return_obj = None
try:
for event in stream:
if event.get('object'):
obj = ResourceInstance(resource, event['object'])
if obj.metadata.name == name and hasattr(obj, 'status'):
if replicas == 0:
if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas:
return_obj = obj
watcher.stop()
break
if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas:
return_obj = obj
watcher.stop()
break
except Exception as exc:
self.fail_json(msg="Exception reading event stream: {0}".format(exc))
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj
def _wait_for_response(self, resource, name, namespace):
""" Wait for an API response """
tries = 0
half = math.ceil(20 / 2)
obj = None
while tries <= half:
obj = resource.get(name=name, namespace=namespace)
if obj:
break
tries += 2
time.sleep(2)
return obj
|
crmccreary/openerp_server
|
refs/heads/master
|
openerp/tools/float_utils.py
|
151
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None):
"""Return ``value`` rounded to ``precision_digits``
decimal digits, minimizing IEEE-754 floating point representation
errors, and applying HALF-UP (away from zero) tie-breaking rule.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
|
shenyy/lily2-gem5
|
refs/heads/master
|
src/cpu/ExeTracer.py
|
19
|
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
from m5.params import *
from InstTracer import InstTracer
class ExeTracer(InstTracer):
type = 'ExeTracer'
cxx_class = 'Trace::ExeTracer'
|
afandria/sky_engine
|
refs/heads/master
|
tools/valgrind/asan/asan_symbolize.py
|
51
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from third_party import asan_symbolize
import argparse
import base64
import json
import os
import platform
import re
import subprocess
import sys
class LineBuffered(object):
"""Disable buffering on a file object."""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
if '\n' in data:
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def disable_buffering():
"""Makes this process and child processes stdout unbuffered."""
if not os.environ.get('PYTHONUNBUFFERED'):
# Since sys.stdout is a C++ object, it's impossible to do
# sys.stdout.write = lambda...
sys.stdout = LineBuffered(sys.stdout)
os.environ['PYTHONUNBUFFERED'] = 'x'
def set_symbolizer_path():
"""Set the path to the llvm-symbolize binary in the Chromium source tree."""
if not os.environ.get('LLVM_SYMBOLIZER_PATH'):
script_dir = os.path.dirname(os.path.abspath(__file__))
# Assume this script resides three levels below src/ (i.e.
# src/tools/valgrind/asan/).
src_root = os.path.join(script_dir, "..", "..", "..")
symbolizer_path = os.path.join(src_root, 'third_party',
'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
assert(os.path.isfile(symbolizer_path))
os.environ['LLVM_SYMBOLIZER_PATH'] = os.path.abspath(symbolizer_path)
def is_hash_name(name):
match = re.match('[0-9a-f]+$', name)
return bool(match)
def split_path(path):
ret = []
while True:
head, tail = os.path.split(path)
if head == path:
return [head] + ret
ret, path = [tail] + ret, head
def chrome_product_dir_path(exe_path):
if exe_path is None:
return None
path_parts = split_path(exe_path)
# Make sure the product dir path isn't empty if |exe_path| consists of
# a single component.
if len(path_parts) == 1:
path_parts = ['.'] + path_parts
for index, part in enumerate(path_parts):
if part.endswith('.app'):
return os.path.join(*path_parts[:index])
# If the executable isn't an .app bundle, it's a commandline binary that
# resides right in the product dir.
return os.path.join(*path_parts[:-1])
inode_path_cache = {}
def find_inode_at_path(inode, path):
if inode in inode_path_cache:
return inode_path_cache[inode]
cmd = ['find', path, '-inum', str(inode)]
find_line = subprocess.check_output(cmd).rstrip()
lines = find_line.split('\n')
ret = None
if lines:
# `find` may give us several paths (e.g. 'Chromium Framework' in the
# product dir and 'Chromium Framework' inside 'Chromium.app',
# chrome_dsym_hints() will produce correct .dSYM path for any of them.
ret = lines[0]
inode_path_cache[inode] = ret
return ret
# Create a binary name filter that works around https://crbug.com/444835.
# When running tests on OSX swarming servers, ASan sometimes prints paths to
# files in cache (ending with SHA1 filenames) instead of paths to hardlinks to
# those files in the product dir.
# For a given |binary_path| chrome_osx_binary_name_filter() returns one of the
# hardlinks to the same inode in |product_dir_path|.
def make_chrome_osx_binary_name_filter(product_dir_path=''):
def chrome_osx_binary_name_filter(binary_path):
basename = os.path.basename(binary_path)
if is_hash_name(basename) and product_dir_path:
inode = os.stat(binary_path).st_ino
new_binary_path = find_inode_at_path(inode, product_dir_path)
if new_binary_path:
return new_binary_path
return binary_path
return chrome_osx_binary_name_filter
# Construct a path to the .dSYM bundle for the given binary.
# There are three possible cases for binary location in Chromium:
# 1. The binary is a standalone executable or dynamic library in the product
# dir, the debug info is in "binary.dSYM" in the product dir.
# 2. The binary is a standalone framework or .app bundle, the debug info is in
# "Framework.framework.dSYM" or "App.app.dSYM" in the product dir.
# 3. The binary is a framework or an .app bundle within another .app bundle
# (e.g. Outer.app/Contents/Versions/1.2.3.4/Inner.app), and the debug info
# is in Inner.app.dSYM in the product dir.
# The first case is handled by llvm-symbolizer, so we only need to construct
# .dSYM paths for .app bundles and frameworks.
# We're assuming that there're no more than two nested bundles in the binary
# path. Only one of these bundles may be a framework and frameworks cannot
# contain other bundles.
def chrome_dsym_hints(binary):
path_parts = split_path(binary)
app_positions = []
framework_positions = []
for index, part in enumerate(path_parts):
if part.endswith('.app'):
app_positions.append(index)
elif part.endswith('.framework'):
framework_positions.append(index)
bundle_positions = app_positions + framework_positions
bundle_positions.sort()
assert len(bundle_positions) <= 2, \
"The path contains more than two nested bundles: %s" % binary
if len(bundle_positions) == 0:
# Case 1: this is a standalone executable or dylib.
return []
assert (not (len(app_positions) == 1 and
len(framework_positions) == 1 and
app_positions[0] > framework_positions[0])), \
"The path contains an app bundle inside a framework: %s" % binary
# Cases 2 and 3. The outermost bundle (which is the only bundle in the case 2)
# is located in the product dir.
outermost_bundle = bundle_positions[0]
product_dir = path_parts[:outermost_bundle]
# In case 2 this is the same as |outermost_bundle|.
innermost_bundle = bundle_positions[-1]
dsym_path = product_dir + [path_parts[innermost_bundle]]
result = '%s.dSYM' % os.path.join(*dsym_path)
return [result]
# We want our output to match base::EscapeJSONString(), which produces
# doubly-escaped strings. The first escaping pass is handled by this class. The
# second pass happens when JSON data is dumped to file.
class StringEncoder(json.JSONEncoder):
def __init__(self):
json.JSONEncoder.__init__(self)
def encode(self, s):
assert(isinstance(s, basestring))
encoded = json.JSONEncoder.encode(self, s)
assert(len(encoded) >= 2)
assert(encoded[0] == '"')
assert(encoded[-1] == '"')
encoded = encoded[1:-1]
# Special case from base::EscapeJSONString().
encoded = encoded.replace('<', '\u003C')
return encoded
class JSONTestRunSymbolizer(object):
def __init__(self, symbolization_loop):
self.string_encoder = StringEncoder()
self.symbolization_loop = symbolization_loop
def symbolize_snippet(self, snippet):
symbolized_lines = []
for line in snippet.split('\n'):
symbolized_lines += self.symbolization_loop.process_line(line)
return '\n'.join(symbolized_lines)
def symbolize(self, test_run):
original_snippet = base64.b64decode(test_run['output_snippet_base64'])
symbolized_snippet = self.symbolize_snippet(original_snippet)
if symbolized_snippet == original_snippet:
# No sanitizer reports in snippet.
return
test_run['original_output_snippet'] = test_run['output_snippet']
test_run['original_output_snippet_base64'] = \
test_run['output_snippet_base64']
escaped_snippet = StringEncoder().encode(symbolized_snippet)
test_run['output_snippet'] = escaped_snippet
test_run['output_snippet_base64'] = \
base64.b64encode(symbolized_snippet)
test_run['snippet_processed_by'] = 'asan_symbolize.py'
# Originally, "lossless" refers to "no Unicode data lost while encoding the
# string". However, since we're applying another kind of transformation
# (symbolization), it doesn't seem right to consider the snippet lossless.
test_run['losless_snippet'] = False
def symbolize_snippets_in_json(filename, symbolization_loop):
with open(filename, 'r') as f:
json_data = json.load(f)
test_run_symbolizer = JSONTestRunSymbolizer(symbolization_loop)
for iteration_data in json_data['per_iteration_data']:
for test_name, test_runs in iteration_data.iteritems():
for test_run in test_runs:
test_run_symbolizer.symbolize(test_run)
with open(filename, 'w') as f:
json.dump(json_data, f, indent=3, sort_keys=True)
def main():
parser = argparse.ArgumentParser(description='Symbolize sanitizer reports.')
parser.add_argument('--test-summary-json-file',
help='Path to a JSON file produced by the test launcher. The script will '
'ignore stdandard input and instead symbolize the output stnippets '
'inside the JSON file. The result will be written back to the JSON '
'file.')
parser.add_argument('strip_path_prefix', nargs='*',
help='When printing source file names, the longest prefix ending in one '
'of these substrings will be stripped. E.g.: "Release/../../".')
parser.add_argument('--executable-path',
help='Path to program executable. Used on OSX swarming bots to locate '
'dSYM bundles for associated frameworks and bundles.')
args = parser.parse_args()
disable_buffering()
set_symbolizer_path()
asan_symbolize.demangle = True
asan_symbolize.fix_filename_patterns = args.strip_path_prefix
# Most source paths for Chromium binaries start with
# /path/to/src/out/Release/../../
asan_symbolize.fix_filename_patterns.append('Release/../../')
binary_name_filter = None
if platform.uname()[0] == 'Darwin':
binary_name_filter = make_chrome_osx_binary_name_filter(
chrome_product_dir_path(args.executable_path))
loop = asan_symbolize.SymbolizationLoop(
binary_name_filter=binary_name_filter,
dsym_hint_producer=chrome_dsym_hints)
if args.test_summary_json_file:
symbolize_snippets_in_json(args.test_summary_json_file, loop)
else:
# Process stdin.
asan_symbolize.logfile = sys.stdin
loop.process_logfile()
if __name__ == '__main__':
main()
|
yg257/Pangea
|
refs/heads/master
|
lib/boto-2.34.0/boto/mashups/server.py
|
153
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 server
"""
import boto
import boto.utils
from boto.compat import StringIO
from boto.mashups.iobject import IObject
from boto.pyami.config import Config, BotoConfigPath
from boto.mashups.interactive import interactive_shell
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty
import os
class ServerSet(list):
def __getattr__(self, name):
results = []
is_callable = False
for server in self:
try:
val = getattr(server, name)
if callable(val):
is_callable = True
results.append(val)
except:
results.append(None)
if is_callable:
self.map_list = results
return self.map
return results
def map(self, *args):
results = []
for fn in self.map_list:
results.append(fn(*args))
return results
class Server(Model):
@property
def ec2(self):
if self._ec2 is None:
self._ec2 = boto.connect_ec2()
return self._ec2
@classmethod
def Inventory(cls):
"""
Returns a list of Server instances, one for each Server object
persisted in the db
"""
l = ServerSet()
rs = cls.find()
for server in rs:
l.append(server)
return l
@classmethod
def Register(cls, name, instance_id, description=''):
s = cls()
s.name = name
s.instance_id = instance_id
s.description = description
s.save()
return s
def __init__(self, id=None, **kw):
super(Server, self).__init__(id, **kw)
self._reservation = None
self._instance = None
self._ssh_client = None
self._pkey = None
self._config = None
self._ec2 = None
name = StringProperty(unique=True, verbose_name="Name")
instance_id = StringProperty(verbose_name="Instance ID")
config_uri = StringProperty()
ami_id = StringProperty(verbose_name="AMI ID")
zone = StringProperty(verbose_name="Availability Zone")
security_group = StringProperty(verbose_name="Security Group", default="default")
key_name = StringProperty(verbose_name="Key Name")
elastic_ip = StringProperty(verbose_name="Elastic IP")
instance_type = StringProperty(verbose_name="Instance Type")
description = StringProperty(verbose_name="Description")
log = StringProperty()
def setReadOnly(self, value):
raise AttributeError
def getInstance(self):
if not self._instance:
if self.instance_id:
try:
rs = self.ec2.get_all_reservations([self.instance_id])
except:
return None
if len(rs) > 0:
self._reservation = rs[0]
self._instance = self._reservation.instances[0]
return self._instance
instance = property(getInstance, setReadOnly, None, 'The Instance for the server')
def getAMI(self):
if self.instance:
return self.instance.image_id
ami = property(getAMI, setReadOnly, None, 'The AMI for the server')
def getStatus(self):
if self.instance:
self.instance.update()
return self.instance.state
status = property(getStatus, setReadOnly, None,
'The status of the server')
def getHostname(self):
if self.instance:
return self.instance.public_dns_name
hostname = property(getHostname, setReadOnly, None,
'The public DNS name of the server')
def getPrivateHostname(self):
if self.instance:
return self.instance.private_dns_name
private_hostname = property(getPrivateHostname, setReadOnly, None,
'The private DNS name of the server')
def getLaunchTime(self):
if self.instance:
return self.instance.launch_time
launch_time = property(getLaunchTime, setReadOnly, None,
'The time the Server was started')
def getConsoleOutput(self):
if self.instance:
return self.instance.get_console_output()
console_output = property(getConsoleOutput, setReadOnly, None,
'Retrieve the console output for server')
def getGroups(self):
if self._reservation:
return self._reservation.groups
else:
return None
groups = property(getGroups, setReadOnly, None,
'The Security Groups controlling access to this server')
def getConfig(self):
if not self._config:
remote_file = BotoConfigPath
local_file = '%s.ini' % self.instance.id
self.get_file(remote_file, local_file)
self._config = Config(local_file)
return self._config
def setConfig(self, config):
local_file = '%s.ini' % self.instance.id
fp = open(local_file)
config.write(fp)
fp.close()
self.put_file(local_file, BotoConfigPath)
self._config = config
config = property(getConfig, setConfig, None,
'The instance data for this server')
def set_config(self, config):
"""
Set SDB based config
"""
self._config = config
self._config.dump_to_sdb("botoConfigs", self.id)
def load_config(self):
self._config = Config(do_load=False)
self._config.load_from_sdb("botoConfigs", self.id)
def stop(self):
if self.instance:
self.instance.stop()
def start(self):
self.stop()
ec2 = boto.connect_ec2()
ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0]
groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)])
if not self._config:
self.load_config()
if not self._config.has_section("Credentials"):
self._config.add_section("Credentials")
self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id)
self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key)
if not self._config.has_section("Pyami"):
self._config.add_section("Pyami")
if self._manager.domain:
self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name)
self._config.set("Pyami", 'server_sdb_name', self.name)
cfg = StringIO()
self._config.write(cfg)
cfg = cfg.getvalue()
r = ami.run(min_count=1,
max_count=1,
key_name=self.key_name,
security_groups = groups,
instance_type = self.instance_type,
placement = self.zone,
user_data = cfg)
i = r.instances[0]
self.instance_id = i.id
self.put()
if self.elastic_ip:
ec2.associate_address(self.instance_id, self.elastic_ip)
def reboot(self):
if self.instance:
self.instance.reboot()
def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts',
uname='root'):
import paramiko
if not self.instance:
print('No instance yet!')
return
if not self._ssh_client:
if not key_file:
iobject = IObject()
key_file = iobject.get_filename('Path to OpenSSH Key file')
self._pkey = paramiko.RSAKey.from_private_key_file(key_file)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._ssh_client.connect(self.instance.public_dns_name,
username=uname, pkey=self._pkey)
return self._ssh_client
def get_file(self, remotepath, localpath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
sftp_client.get(remotepath, localpath)
def put_file(self, localpath, remotepath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
sftp_client.put(localpath, remotepath)
def listdir(self, remotepath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
return sftp_client.listdir(remotepath)
def shell(self, key_file=None):
ssh_client = self.get_ssh_client(key_file)
channel = ssh_client.invoke_shell()
interactive_shell(channel)
def bundle_image(self, prefix, key_file, cert_file, size):
print('bundling image...')
print('\tcopying cert and pk over to /mnt directory on server')
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
path, name = os.path.split(key_file)
remote_key_file = '/mnt/%s' % name
self.put_file(key_file, remote_key_file)
path, name = os.path.split(cert_file)
remote_cert_file = '/mnt/%s' % name
self.put_file(cert_file, remote_cert_file)
print('\tdeleting %s' % BotoConfigPath)
# delete the metadata.ini file if it exists
try:
sftp_client.remove(BotoConfigPath)
except:
pass
command = 'sudo ec2-bundle-vol '
command += '-c %s -k %s ' % (remote_cert_file, remote_key_file)
command += '-u %s ' % self._reservation.owner_id
command += '-p %s ' % prefix
command += '-s %d ' % size
command += '-d /mnt '
if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium':
command += '-r i386'
else:
command += '-r x86_64'
print('\t%s' % command)
t = ssh_client.exec_command(command)
response = t[1].read()
print('\t%s' % response)
print('\t%s' % t[2].read())
print('...complete!')
def upload_bundle(self, bucket, prefix):
print('uploading bundle...')
command = 'ec2-upload-bundle '
command += '-m /mnt/%s.manifest.xml ' % prefix
command += '-b %s ' % bucket
command += '-a %s ' % self.ec2.aws_access_key_id
command += '-s %s ' % self.ec2.aws_secret_access_key
print('\t%s' % command)
ssh_client = self.get_ssh_client()
t = ssh_client.exec_command(command)
response = t[1].read()
print('\t%s' % response)
print('\t%s' % t[2].read())
print('...complete!')
def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None):
iobject = IObject()
if not bucket:
bucket = iobject.get_string('Name of S3 bucket')
if not prefix:
prefix = iobject.get_string('Prefix for AMI file')
if not key_file:
key_file = iobject.get_filename('Path to RSA private key file')
if not cert_file:
cert_file = iobject.get_filename('Path to RSA public cert file')
if not size:
size = iobject.get_int('Size (in MB) of bundled image')
self.bundle_image(prefix, key_file, cert_file, size)
self.upload_bundle(bucket, prefix)
print('registering image...')
self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix))
return self.image_id
def attach_volume(self, volume, device="/dev/sdp"):
"""
Attach an EBS volume to this server
:param volume: EBS Volume to attach
:type volume: boto.ec2.volume.Volume
:param device: Device to attach to (default to /dev/sdp)
:type device: string
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)
def detach_volume(self, volume):
"""
Detach an EBS volume from this server
:param volume: EBS Volume to detach
:type volume: boto.ec2.volume.Volume
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)
def install_package(self, package_name):
print('installing %s...' % package_name)
command = 'yum -y install %s' % package_name
print('\t%s' % command)
ssh_client = self.get_ssh_client()
t = ssh_client.exec_command(command)
response = t[1].read()
print('\t%s' % response)
print('\t%s' % t[2].read())
print('...complete!')
|
AloneRoad/Inforlearn
|
refs/heads/1.0-rc3
|
vendor/gdata/oauth/__init__.py
|
157
|
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
# Generic exception class
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
# optional WWW-Authenticate header (401 error)
def build_authenticate_header(realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# url escape
def escape(s):
# escape '/' too
return urllib.quote(s, safe='~')
# util function: current timestamp
# seconds since epoch (UTC)
def generate_timestamp():
return int(time.time())
# util function: nonce
# pseudorandom number
def generate_nonce(length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
# OAuthConsumer is a data type that represents the identity of the Consumer
# via its shared secret with the Service Provider.
class OAuthConsumer(object):
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
# OAuthToken is a data type that represents an End User via either an access
# or request token.
class OAuthToken(object):
# access tokens and request tokens
key = None
secret = None
'''
key = the token
secret = the token secret
'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
def to_string(self):
return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret})
# return a token from something like:
# oauth_token_secret=digg&oauth_token=digg
def from_string(s):
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
return OAuthToken(key, secret)
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
# OAuthRequest represents the request and can be serialized
class OAuthRequest(object):
'''
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
... any additional parameters, as defined by the Service Provider.
'''
parameters = None # oauth parameters
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce')
# get any non-oauth parameters
def get_nonoauth_parameters(self):
parameters = {}
for k, v in self.parameters.iteritems():
# ignore oauth parameters
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
# serialize as a header for an HTTPAuth request
def to_header(self, realm=''):
auth_header = 'OAuth realm="%s"' % realm
# add the oauth parameters
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
# serialize as post data for a POST request
def to_postdata(self):
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()])
# serialize as a url for a GET request
def to_url(self):
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
# return a string that consists of all the parameters that need to be signed
def get_normalized_parameters(self):
params = self.parameters
try:
# exclude the signature if it exists
del params['oauth_signature']
except:
pass
key_values = params.items()
# sort lexicographically, first after key, then after value
key_values.sort()
# combine key value pairs in string and escape
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values])
# just uppercases the http method
def get_normalized_http_method(self):
return self.http_method.upper()
# parses the url and rebuilds it to be scheme://host/path
def get_normalized_http_url(self):
parts = urlparse.urlparse(self.http_url)
url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path
return url_string
# set the signature parameter to the result of build_signature
def sign_request(self, signature_method, consumer, token):
# set the signature method
self.set_parameter('oauth_signature_method', signature_method.get_name())
# set the signature
self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
# call the build signature method within the signature method
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None, query_string=None):
# combine multiple parameter sources
if parameters is None:
parameters = {}
# headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# check that the authorization header is OAuth
if auth_header.index('OAuth') > -1:
try:
# get the parameters from the header
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from Authorization header.')
# GET or POST query string
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
# util function: turn Authorization: header into parameters, has to do some unescaping
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# ignore realm parameter
if param.find('OAuth realm') > -1:
continue
# remove whitespace
param = param.strip()
# split key-value
param_parts = param.split('=', 1)
# remove quotes and unescape the value
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
# util function: turn url string into parameters, has to do some unescaping
def _split_url_string(param_str):
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
# OAuthServer is a worker to check a requests validity against a data store
class OAuthServer(object):
timestamp_threshold = 300 # in seconds, five minutes
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, oauth_data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
# process a request_token request
# returns the request token on success
def fetch_request_token(self, oauth_request):
try:
# get the request token for authorization
token = self._get_token(oauth_request, 'request')
except OAuthError:
# no token required for the initial token request
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
self._check_signature(oauth_request, consumer, None)
# fetch a new token
token = self.data_store.fetch_request_token(consumer)
return token
# process an access_token request
# returns the access token on success
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the request token
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token)
return new_token
# verify an api call, checks all the parameters
def verify_request(self, oauth_request):
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# get the access token
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
# authorize a request token
def authorize_token(self, token, user):
return self.data_store.authorize_request_token(token, user)
# get the callback url
def get_callback(self, oauth_request):
return oauth_request.get_parameter('oauth_callback')
# optional support for the authenticate header
def build_authenticate_header(self, realm=''):
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
# verify the correct version request for this server
def _get_version(self, oauth_request):
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
# figure out the signature with some defaults
def _get_signature_method(self, oauth_request):
try:
signature_method = oauth_request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# get the signature method object
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
if not consumer_key:
raise OAuthError('Invalid consumer key.')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
# try to find the token for the provided request token key
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# validate the signature
valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
# verify that timestamp is recentish
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
# verify that the nonce is uniqueish
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
# OAuthClient is a worker to attempt to execute a request
class OAuthClient(object):
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_request):
# -> OAuthToken
raise NotImplementedError
def access_resource(self, oauth_request):
# -> some protected resource
raise NotImplementedError
# OAuthDataStore is a database abstraction used to lookup consumers and tokens
class OAuthDataStore(object):
def lookup_consumer(self, key):
# -> OAuthConsumer
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
# -> OAuthToken
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp):
# -> OAuthToken
raise NotImplementedError
def fetch_request_token(self, oauth_consumer):
# -> OAuthToken
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token):
# -> OAuthToken
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
# -> OAuthToken
raise NotImplementedError
# OAuthSignatureMethod is a strategy class that implements a signature method
class OAuthSignatureMethod(object):
def get_name(self):
# -> str
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
# -> str key, str raw
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
# -> str
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
# build the base signature string
key, raw = self.build_signature_base_string(oauth_request, consumer, token)
# hmac object
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # deprecated
hashed = hmac.new(key, raw, sha)
# calculate the digest base 64
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
# concatenate the consumer key and secret
sig = escape(consumer.secret) + '&'
if token:
sig = sig + escape(token.secret)
return sig
def build_signature(self, oauth_request, consumer, token):
return self.build_signature_base_string(oauth_request, consumer, token)
|
Bleno/teste_api
|
refs/heads/master
|
ambiente/lib/python2.7/posixpath.py
|
4
|
/usr/lib/python2.7/posixpath.py
|
jiaaro/pydub
|
refs/heads/master
|
pydub/playback.py
|
1
|
"""
Support for playing AudioSegments. Pyaudio will be used if it's installed,
otherwise will fallback to ffplay. Pyaudio is a *much* nicer solution, but
is tricky to install. See my notes on installing pyaudio in a virtualenv (on
OSX 10.10): https://gist.github.com/jiaaro/9767512210a1d80a8a0d
"""
import subprocess
from tempfile import NamedTemporaryFile
from .utils import get_player_name, make_chunks
def _play_with_ffplay(seg):
PLAYER = get_player_name()
with NamedTemporaryFile("w+b", suffix=".wav") as f:
seg.export(f.name, "wav")
subprocess.call([PLAYER, "-nodisp", "-autoexit", "-hide_banner", f.name])
def _play_with_pyaudio(seg):
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width),
channels=seg.channels,
rate=seg.frame_rate,
output=True)
# Just in case there were any exceptions/interrupts, we release the resource
# So as not to raise OSError: Device Unavailable should play() be used again
try:
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
finally:
stream.stop_stream()
stream.close()
p.terminate()
def _play_with_simpleaudio(seg):
import simpleaudio
return simpleaudio.play_buffer(
seg.raw_data,
num_channels=seg.channels,
bytes_per_sample=seg.sample_width,
sample_rate=seg.frame_rate
)
def play(audio_segment):
try:
playback = _play_with_simpleaudio(audio_segment)
try:
playback.wait_done()
except KeyboardInterrupt:
playback.stop()
except ImportError:
pass
else:
return
try:
_play_with_pyaudio(audio_segment)
return
except ImportError:
pass
else:
return
_play_with_ffplay(audio_segment)
|
JTCunning/sentry
|
refs/heads/master
|
tests/sentry/auth/test_access.py
|
13
|
from __future__ import absolute_import
from mock import Mock
from sentry.auth import access
from sentry.models import AuthProvider
from sentry.testutils import TestCase
class FromUserTest(TestCase):
def test_no_access(self):
organization = self.create_organization()
team = self.create_team(organization=organization)
user = self.create_user()
result = access.from_user(user, organization)
assert not result.is_active
assert result.sso_is_valid
assert not result.scopes
assert not result.has_team(team)
def test_global_org_member_access(self):
user = self.create_user()
organization = self.create_organization(owner=user)
member = organization.member_set.get(user=user)
team = self.create_team(organization=organization)
result = access.from_user(user, organization)
assert result.is_active
assert result.sso_is_valid
assert result.scopes == member.get_scopes()
assert result.has_team(team)
def test_team_restricted_org_member_access(self):
user = self.create_user()
organization = self.create_organization()
team = self.create_team(organization=organization)
member = self.create_member(
organization=organization,
user=user,
has_global_access=False,
teams=[team],
)
result = access.from_user(user, organization)
assert result.is_active
assert result.sso_is_valid
assert result.scopes == member.get_scopes()
assert result.has_team(team)
def test_unlinked_sso(self):
user = self.create_user()
organization = self.create_organization(owner=user)
member = organization.member_set.get(user=user)
team = self.create_team(organization=organization)
AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
result = access.from_user(user, organization)
assert not result.sso_is_valid
def test_sso_without_link_requirement(self):
user = self.create_user()
organization = self.create_organization(owner=user)
member = organization.member_set.get(user=user)
team = self.create_team(organization=organization)
AuthProvider.objects.create(
organization=organization,
provider='dummy',
flags=AuthProvider.flags.allow_unlinked,
)
result = access.from_user(user, organization)
assert result.sso_is_valid
def test_anonymous_user(self):
from django.contrib.auth.models import AnonymousUser
user = self.create_user()
anon_user = AnonymousUser()
organization = self.create_organization(owner=user)
result = access.from_user(anon_user, organization)
assert not result.is_active
class DefaultAccessTest(TestCase):
def test_no_access(self):
result = access.DEFAULT
assert not result.is_active
assert result.sso_is_valid
assert not result.scopes
assert not result.has_team(Mock())
|
unomena/pyxero
|
refs/heads/master
|
xero/manager.py
|
1
|
from __future__ import unicode_literals
import requests
import six
import json
from xml.dom.minidom import parseString
from xml.etree.ElementTree import tostring, SubElement, Element
from datetime import datetime
from dateutil.parser import parse
from decimal import Decimal
from six.moves.urllib.parse import parse_qs
from .constants import XERO_API_URL
from .exceptions import *
from .utils import singular, isplural, parse_date, json_load_object_hook
class Manager(object):
DECORATED_METHODS = (
'get',
'save',
'filter',
'all',
'allocate',
'put',
'get_attachments',
'get_attachment_data',
'put_attachment_data',
)
DATETIME_FIELDS = (
'UpdatedDateUTC',
'Updated',
'FullyPaidOnDate',
'DateTimeUTC',
'CreatedDateUTC'
)
DATE_FIELDS = (
'DueDate',
'Date',
'PaymentDate',
'StartDate',
'EndDate',
'PeriodLockDate',
'DateOfBirth',
'OpeningBalanceDate',
'PaymentDueDate',
'ReportingDate',
)
BOOLEAN_FIELDS = (
'IsSupplier',
'IsCustomer',
'IsDemoCompany',
'PaysTax',
'IsAuthorisedToApproveTimesheets',
'IsAuthorisedToApproveLeave',
'HasHELPDebt',
'AustralianResidentForTaxPurposes',
'TaxFreeThresholdClaimed',
'HasSFSSDebt',
'EligibleToReceiveLeaveLoading',
'IsExemptFromTax',
'IsExemptFromSuper',
'SentToContact',
'IsSubscriber',
'HasAttachments',
)
DECIMAL_FIELDS = (
'Hours',
'NumberOfUnit',
)
INTEGER_FIELDS = (
'FinancialYearEndDay',
'FinancialYearEndMonth',
)
NO_SEND_FIELDS = (
'UpdatedDateUTC',
)
OPERATOR_MAPPINGS = {
'gt': '>',
'lt': '<',
'lte': '<=',
'gte': '>=',
'ne': '!='
}
def __init__(self, name, credentials, unit_price_4dps=False):
self.credentials = credentials
self.name = name
self.base_url = credentials.base_url + XERO_API_URL
self.extra_params = {"unitdp": 4} if unit_price_4dps else {}
self.singular = singular(name)
for method_name in self.DECORATED_METHODS:
method = getattr(self, '_%s' % method_name)
setattr(self, method_name, self._get_data(method))
def dict_to_xml(self, root_elm, data):
for key in data.keys():
# Xero will complain if we send back these fields.
if key in self.NO_SEND_FIELDS:
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
# Key references a dict. Unroll the dict
# as it's own XML node with subnodes
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
# Key references a list/tuple
elif isinstance(sub_data, list) or isinstance(sub_data, tuple):
# key name is a plural. This means each item
# in the list needs to be wrapped in an XML
# node that is a singular version of the list name.
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
# key name isn't a plural. Just insert the content
# as an XML node with subnodes
else:
for d in sub_data:
self.dict_to_xml(elm, d)
# Normal element - just insert the data.
else:
if key in self.BOOLEAN_FIELDS:
val = 'true' if sub_data else 'false'
else:
val = six.text_type(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data, use_plural=False):
if isinstance(data, list) or isinstance(data, tuple):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
if use_plural:
self.singular = self.singular + 's'
root_elm = self.dict_to_xml(Element(self.singular), data)
return tostring(root_elm)
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert data['Status'] == 'OK', "Expected the API to say OK but received %s" % data['Status']
return data[resource_name]
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
from xero import __version__ as VERSION
timeout = kwargs.pop('timeout', None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
cert = getattr(self.credentials, 'client_cert', None)
if headers is None:
headers = {}
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if 'Accept' not in headers:
headers['Accept'] = 'application/json'
# Set a user-agent so Xero knows the traffic is coming from pyxero
headers['User-Agent'] = 'pyxero/%s ' % VERSION + requests.utils.default_user_agent()
response = getattr(requests, method)(
uri, data=body, headers=headers, auth=self.credentials.oauth,
params=params, cert=cert, timeout=timeout)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a binary file
if not response.headers['content-type'].startswith('application/json'):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 400:
if 'ValidationErrors' in response.content:
data = json.loads(response.text)
errors = [
{
'ValidationErrors': element['ValidationErrors']
} for element in data['Elements']
]
return errors
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None, params=None):
uri = '/'.join([self.base_url, self.name, id])
uri_params = self.extra_params.copy()
uri_params.update(params if params else {})
return uri, uri_params, 'get', None, headers, True
def _allocate(self, id, data, headers=None, section='creditnotes'):
uri = '/'.join([self.base_url, section, id, 'allocations']) + '/'
body = {'xml': self._prepare_data_for_save(data, use_plural=True)}
return uri, {}, 'put', body, headers, True
def _get_attachments(self, id):
"""Retrieve a list of attachments associated with this Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments']) + '/'
return uri, {}, 'get', None, None, False
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
return uri, {}, 'get', None, None, False
def get_attachment(self, id, filename, file):
"""
Retrieve the contents of a specific attachment (identified by filename).
Writes data to file object, returns length of data written.
"""
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def save_or_put(self, data, method='post', headers=None, summarize_errors=True):
uri = '/'.join([self.base_url, self.name])
body = {'xml': self._prepare_data_for_save(data)}
params = self.extra_params.copy()
if not summarize_errors:
params['summarizeErrors'] = 'false'
print body
return uri, params, method, body, headers, False
def _save(self, data):
return self.save_or_put(data, method='post')
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method='put', summarize_errors=summarize_errors)
def _put_attachment_data(self, id, filename, data, content_type, include_online=False):
"""Upload an attachment to the Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
params = {'IncludeOnline': 'true'} if include_online else {}
headers = {'Content-Type': content_type, 'Content-Length': len(data)}
return uri, params, 'put', data, headers, False
def put_attachment(self, id, filename, file, content_type, include_online=False):
"""Upload an attachment to the Xero object (from file object)."""
self.put_attachment_data(id, filename, file.read(), content_type,
include_online=include_online)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
val = '"%s"' % val
return {'If-Modified-Since': val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = '/'.join([self.base_url, self.name])
if kwargs:
if 'since' in kwargs:
val = kwargs['since']
headers = self.prepare_filtering_date(val)
del kwargs['since']
def get_filter_params(key, value):
last_key = key.split('_')[-1]
if last_key.upper().endswith('ID'):
return 'Guid("%s")' % six.text_type(value)
if key in self.BOOLEAN_FIELDS:
return 'true' if value else 'false'
elif key in self.DATE_FIELDS:
return 'DateTime(%s,%s,%s)' % (value.year, value.month, value.day)
elif key in self.DATETIME_FIELDS:
return value.isoformat()
else:
return '"%s"' % six.text_type(value)
def generate_param(key, value):
parts = key.split("__")
field = key.replace('_', '.')
fmt = '%s==%s'
if len(parts) == 2:
# support filters:
# Name__Contains=John becomes Name.Contains("John")
if parts[1] in ["contains", "startswith", "endswith"]:
field = parts[0]
fmt = ''.join(['%s.', parts[1], '(%s)'])
elif parts[1] in self.OPERATOR_MAPPINGS:
field = parts[0]
key = field
fmt = '%s' + self.OPERATOR_MAPPINGS[parts[1]] + '%s'
elif parts[1] in ["isnull"]:
sign = '=' if value else '!'
return '%s%s=null' % (parts[0], sign)
return fmt % (
field,
get_filter_params(key, value)
)
# Move any known parameter names to the query string
KNOWN_PARAMETERS = ['order', 'offset', 'page']
for param in KNOWN_PARAMETERS:
if param in kwargs:
params[param] = kwargs.pop(param)
filter_params = []
if 'raw' in kwargs:
raw = kwargs.pop('raw')
filter_params.append(raw)
# Treat any remaining arguments as filter predicates
# Xero will break if you search without a check for null in the first position:
# http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3
sortedkwargs = sorted(six.iteritems(kwargs),
key=lambda item: -1 if 'isnull' in item[0] else 0)
for key, value in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params['where'] = '&&'.join(filter_params)
return uri, params, 'get', None, headers, False
def _all(self, additional_path=None):
uri_list = [self.base_url, self.name]
if additional_path is not None:
uri_list.append(additional_path)
uri = '/'.join(uri_list)
return uri, {}, 'get', None, None, False
|
Sing-Li/go-buildpack
|
refs/heads/master
|
builds/runtimes/python-2.7.6/lib/python2.7/test/test_future4.py
|
137
|
from __future__ import unicode_literals
import unittest
from test import test_support
class TestFuture(unittest.TestCase):
def assertType(self, obj, typ):
self.assertTrue(type(obj) is typ,
"type(%r) is %r, not %r" % (obj, type(obj), typ))
def test_unicode_strings(self):
self.assertType("", unicode)
self.assertType('', unicode)
self.assertType(r"", unicode)
self.assertType(r'', unicode)
self.assertType(""" """, unicode)
self.assertType(''' ''', unicode)
self.assertType(r""" """, unicode)
self.assertType(r''' ''', unicode)
self.assertType(u"", unicode)
self.assertType(u'', unicode)
self.assertType(ur"", unicode)
self.assertType(ur'', unicode)
self.assertType(u""" """, unicode)
self.assertType(u''' ''', unicode)
self.assertType(ur""" """, unicode)
self.assertType(ur''' ''', unicode)
self.assertType(b"", str)
self.assertType(b'', str)
self.assertType(br"", str)
self.assertType(br'', str)
self.assertType(b""" """, str)
self.assertType(b''' ''', str)
self.assertType(br""" """, str)
self.assertType(br''' ''', str)
self.assertType('' '', unicode)
self.assertType('' u'', unicode)
self.assertType(u'' '', unicode)
self.assertType(u'' u'', unicode)
def test_main():
test_support.run_unittest(TestFuture)
if __name__ == "__main__":
test_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.