repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jessstrap/servotk
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/py/_path/common.py
|
171
|
"""
"""
import os, sys, posixpath
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = self.strpath
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)
|
Dandandan/wikiprogramming
|
refs/heads/master
|
jsrepl/build/extern/python/reloop-closured/lib/python2.7/new.py
|
233
|
"""Create new objects of various types. Deprecated.
This module is no longer required except for backward compatibility.
Objects of most types can now be created by calling the type object.
"""
from warnings import warnpy3k
warnpy3k("The 'new' module has been removed in Python 3.0; use the 'types' "
"module instead.", stacklevel=2)
del warnpy3k
from types import ClassType as classobj
from types import FunctionType as function
from types import InstanceType as instance
from types import MethodType as instancemethod
from types import ModuleType as module
from types import CodeType as code
|
rallylee/gem5
|
refs/heads/master
|
src/arch/sparc/SparcNativeTrace.py
|
42
|
# Copyright (c) 2009 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
from m5.params import *
from CPUTracers import NativeTrace
class SparcNativeTrace(NativeTrace):
type = 'SparcNativeTrace'
cxx_class = 'Trace::SparcNativeTrace'
cxx_header = 'arch/sparc/nativetrace.hh'
|
CMLL/taiga-back
|
refs/heads/master
|
taiga/projects/mixins/__init__.py
|
12133432
| |
ACJTeam/enigma2
|
refs/heads/master
|
lib/python/Plugins/Extensions/CutListEditor/__init__.py
|
12133432
| |
rohitwaghchaure/frappe
|
refs/heads/develop
|
frappe/website/doctype/web_form_field/__init__.py
|
12133432
| |
dangra/scrapy
|
refs/heads/master
|
scrapy/templates/project/module/__init__.py
|
12133432
| |
andrewschaaf/pyxc-pj
|
refs/heads/master
|
pyxc/__init__.py
|
12133432
| |
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.4/Lib/idlelib/Debugger.py
|
9
|
import os
import bdb
import types
from Tkinter import *
from WindowList import ListedToplevel
from ScrolledList import ScrolledList
class Idb(bdb.Bdb):
def __init__(self, gui):
self.gui = gui
bdb.Bdb.__init__(self)
def user_line(self, frame):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame)
def user_exception(self, frame, info):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame, info)
def in_rpc_code(self, frame):
if frame.f_code.co_filename.count('rpc.py'):
return True
else:
prev_frame = frame.f_back
if prev_frame.f_code.co_filename.count('Debugger.py'):
# (that test will catch both Debugger.py and RemoteDebugger.py)
return False
return self.in_rpc_code(prev_frame)
def __frame2message(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
return message
class Debugger:
vstack = vsource = vlocals = vglobals = None
def __init__(self, pyshell, idb=None):
if idb is None:
idb = Idb(self)
self.pyshell = pyshell
self.idb = idb
self.frame = None
self.make_gui()
self.interacting = 0
def run(self, *args):
try:
self.interacting = 1
return self.idb.run(*args)
finally:
self.interacting = 0
def close(self, event=None):
if self.interacting:
self.top.bell()
return
if self.stackviewer:
self.stackviewer.close(); self.stackviewer = None
# Clean up pyshell if user clicked debugger control close widget.
# (Causes a harmless extra cycle through close_debugger() if user
# toggled debugger from pyshell Debug menu)
self.pyshell.close_debugger()
# Now close the debugger control window....
self.top.destroy()
def make_gui(self):
pyshell = self.pyshell
self.flist = pyshell.flist
self.root = root = pyshell.root
self.top = top = ListedToplevel(root)
self.top.wm_title("Debug Control")
self.top.wm_iconname("Debug")
top.wm_protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<Escape>", self.close)
#
self.bframe = bframe = Frame(top)
self.bframe.pack(anchor="w")
self.buttons = bl = []
#
self.bcont = b = Button(bframe, text="Go", command=self.cont)
bl.append(b)
self.bstep = b = Button(bframe, text="Step", command=self.step)
bl.append(b)
self.bnext = b = Button(bframe, text="Over", command=self.next)
bl.append(b)
self.bret = b = Button(bframe, text="Out", command=self.ret)
bl.append(b)
self.bret = b = Button(bframe, text="Quit", command=self.quit)
bl.append(b)
#
for b in bl:
b.configure(state="disabled")
b.pack(side="left")
#
self.cframe = cframe = Frame(bframe)
self.cframe.pack(side="left")
#
if not self.vstack:
self.__class__.vstack = BooleanVar(top)
self.vstack.set(1)
self.bstack = Checkbutton(cframe,
text="Stack", command=self.show_stack, variable=self.vstack)
self.bstack.grid(row=0, column=0)
if not self.vsource:
self.__class__.vsource = BooleanVar(top)
self.bsource = Checkbutton(cframe,
text="Source", command=self.show_source, variable=self.vsource)
self.bsource.grid(row=0, column=1)
if not self.vlocals:
self.__class__.vlocals = BooleanVar(top)
self.vlocals.set(1)
self.blocals = Checkbutton(cframe,
text="Locals", command=self.show_locals, variable=self.vlocals)
self.blocals.grid(row=1, column=0)
if not self.vglobals:
self.__class__.vglobals = BooleanVar(top)
self.bglobals = Checkbutton(cframe,
text="Globals", command=self.show_globals, variable=self.vglobals)
self.bglobals.grid(row=1, column=1)
#
self.status = Label(top, anchor="w")
self.status.pack(anchor="w")
self.error = Label(top, anchor="w")
self.error.pack(anchor="w", fill="x")
self.errorbg = self.error.cget("background")
#
self.fstack = Frame(top, height=1)
self.fstack.pack(expand=1, fill="both")
self.flocals = Frame(top)
self.flocals.pack(expand=1, fill="both")
self.fglobals = Frame(top, height=1)
self.fglobals.pack(expand=1, fill="both")
#
if self.vstack.get():
self.show_stack()
if self.vlocals.get():
self.show_locals()
if self.vglobals.get():
self.show_globals()
def interaction(self, message, frame, info=None):
self.frame = frame
self.status.configure(text=message)
#
if info:
type, value, tb = info
try:
m1 = type.__name__
except AttributeError:
m1 = "%s" % str(type)
if value is not None:
try:
m1 = "%s: %s" % (m1, str(value))
except:
pass
bg = "yellow"
else:
m1 = ""
tb = None
bg = self.errorbg
self.error.configure(text=m1, background=bg)
#
sv = self.stackviewer
if sv:
stack, i = self.idb.get_stack(self.frame, tb)
sv.load_stack(stack, i)
#
self.show_variables(1)
#
if self.vsource.get():
self.sync_source_line()
#
for b in self.buttons:
b.configure(state="normal")
#
self.top.wakeup()
self.root.mainloop()
#
for b in self.buttons:
b.configure(state="disabled")
self.status.configure(text="")
self.error.configure(text="", background=self.errorbg)
self.frame = None
def sync_source_line(self):
frame = self.frame
if not frame:
return
filename, lineno = self.__frame2fileline(frame)
if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
self.flist.gotofileline(filename, lineno)
def __frame2fileline(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
return filename, lineno
def cont(self):
self.idb.set_continue()
self.root.quit()
def step(self):
self.idb.set_step()
self.root.quit()
def next(self):
self.idb.set_next(self.frame)
self.root.quit()
def ret(self):
self.idb.set_return(self.frame)
self.root.quit()
def quit(self):
self.idb.set_quit()
self.root.quit()
stackviewer = None
def show_stack(self):
if not self.stackviewer and self.vstack.get():
self.stackviewer = sv = StackViewer(self.fstack, self.flist, self)
if self.frame:
stack, i = self.idb.get_stack(self.frame, None)
sv.load_stack(stack, i)
else:
sv = self.stackviewer
if sv and not self.vstack.get():
self.stackviewer = None
sv.close()
self.fstack['height'] = 1
def show_source(self):
if self.vsource.get():
self.sync_source_line()
def show_frame(self, (frame, lineno)):
self.frame = frame
self.show_variables()
localsviewer = None
globalsviewer = None
def show_locals(self):
lv = self.localsviewer
if self.vlocals.get():
if not lv:
self.localsviewer = NamespaceViewer(self.flocals, "Locals")
else:
if lv:
self.localsviewer = None
lv.close()
self.flocals['height'] = 1
self.show_variables()
def show_globals(self):
gv = self.globalsviewer
if self.vglobals.get():
if not gv:
self.globalsviewer = NamespaceViewer(self.fglobals, "Globals")
else:
if gv:
self.globalsviewer = None
gv.close()
self.fglobals['height'] = 1
self.show_variables()
def show_variables(self, force=0):
lv = self.localsviewer
gv = self.globalsviewer
frame = self.frame
if not frame:
ldict = gdict = None
else:
ldict = frame.f_locals
gdict = frame.f_globals
if lv and gv and ldict is gdict:
ldict = None
if lv:
lv.load_dict(ldict, force, self.pyshell.interp.rpcclt)
if gv:
gv.load_dict(gdict, force, self.pyshell.interp.rpcclt)
def set_breakpoint_here(self, filename, lineno):
self.idb.set_break(filename, lineno)
def clear_breakpoint_here(self, filename, lineno):
self.idb.clear_break(filename, lineno)
def clear_file_breaks(self, filename):
self.idb.clear_all_file_breaks(filename)
def load_breakpoints(self):
"Load PyShellEditorWindow breakpoints into subprocess debugger"
pyshell_edit_windows = self.pyshell.flist.inversedict.keys()
for editwin in pyshell_edit_windows:
filename = editwin.io.filename
try:
for lineno in editwin.breakpoints:
self.set_breakpoint_here(filename, lineno)
except AttributeError:
continue
class StackViewer(ScrolledList):
def __init__(self, master, flist, gui):
ScrolledList.__init__(self, master, width=80)
self.flist = flist
self.gui = gui
self.stack = []
def load_stack(self, stack, index=None):
self.stack = stack
self.clear()
for i in range(len(stack)):
frame, lineno = stack[i]
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
import linecache
sourceline = linecache.getline(filename, lineno)
import string
sourceline = string.strip(sourceline)
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(), line %d: %s" % (modname, funcname,
lineno, sourceline)
if i == index:
item = "> " + item
self.append(item)
if index is not None:
self.select(index)
def popup_event(self, event):
"override base method"
if self.stack:
return ScrolledList.popup_event(self, event)
def fill_menu(self):
"override base method"
menu = self.menu
menu.add_command(label="Go to source line",
command=self.goto_source_line)
menu.add_command(label="Show stack frame",
command=self.show_stack_frame)
def on_select(self, index):
"override base method"
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def on_double(self, index):
"override base method"
self.show_source(index)
def goto_source_line(self):
index = self.listbox.index("active")
self.show_source(index)
def show_stack_frame(self):
index = self.listbox.index("active")
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def show_source(self, index):
if not (0 <= index < len(self.stack)):
return
frame, lineno = self.stack[index]
code = frame.f_code
filename = code.co_filename
if os.path.isfile(filename):
edit = self.flist.open(filename)
if edit:
edit.gotoline(lineno)
class NamespaceViewer:
def __init__(self, master, title, dict=None):
width = 0
height = 40
if dict:
height = 20*len(dict) # XXX 20 == observed height of Entry widget
self.master = master
self.title = title
import repr
self.repr = repr.Repr()
self.repr.maxstring = 60
self.repr.maxother = 60
self.frame = frame = Frame(master)
self.frame.pack(expand=1, fill="both")
self.label = Label(frame, text=title, borderwidth=2, relief="groove")
self.label.pack(fill="x")
self.vbar = vbar = Scrollbar(frame, name="vbar")
vbar.pack(side="right", fill="y")
self.canvas = canvas = Canvas(frame,
height=min(300, max(40, height)),
scrollregion=(0, 0, width, height))
canvas.pack(side="left", fill="both", expand=1)
vbar["command"] = canvas.yview
canvas["yscrollcommand"] = vbar.set
self.subframe = subframe = Frame(canvas)
self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
self.load_dict(dict)
dict = -1
def load_dict(self, dict, force=0, rpc_client=None):
if dict is self.dict and not force:
return
subframe = self.subframe
frame = self.frame
for c in subframe.children.values():
c.destroy()
self.dict = None
if not dict:
l = Label(subframe, text="None")
l.grid(row=0, column=0)
else:
names = dict.keys()
names.sort()
row = 0
for name in names:
value = dict[name]
svalue = self.repr.repr(value) # repr(value)
# Strip extra quotes caused by calling repr on the (already)
# repr'd value sent across the RPC interface:
if rpc_client:
svalue = svalue[1:-1]
l = Label(subframe, text=name)
l.grid(row=row, column=0, sticky="nw")
l = Entry(subframe, width=0, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="nw")
row = row+1
self.dict = dict
# XXX Could we use a <Configure> callback for the following?
subframe.update_idletasks() # Alas!
width = subframe.winfo_reqwidth()
height = subframe.winfo_reqheight()
canvas = self.canvas
self.canvas["scrollregion"] = (0, 0, width, height)
if height > 300:
canvas["height"] = 300
frame.pack(expand=1)
else:
canvas["height"] = height
frame.pack(expand=0)
def close(self):
self.frame.destroy()
|
Pablo126/SSBW
|
refs/heads/master
|
Entrega1/lib/python3.5/site-packages/django/template/loader_tags.py
|
44
|
import logging
import posixpath
import warnings
from collections import defaultdict
from django.utils import six
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.safestring import mark_safe
from .base import (
Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs,
)
from .library import Library
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
logger = logging.getLogger('django.template')
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
context_key = 'extends_context'
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def find_template(self, template_name, context):
"""
This is a wrapper around engine.find_template(). A history is kept in
the render_context attribute between successive extends calls and
passed as the skip argument. This enables extends to work recursively
without extending the same template twice.
"""
# RemovedInDjango20Warning: If any non-recursive loaders are installed
# do a direct template lookup. If the same template name appears twice,
# raise an exception to avoid system recursion.
for loader in context.template.engine.template_loaders:
if not loader.supports_recursion:
history = context.render_context.setdefault(
self.context_key, [context.template.origin.template_name],
)
if template_name in history:
raise ExtendsError(
"Cannot extend templates recursively when using "
"non-recursive template loaders",
)
template = context.template.engine.get_template(template_name)
history.append(template_name)
return template
history = context.render_context.setdefault(
self.context_key, [context.template.origin],
)
template, origin = context.template.engine.find_template(
template_name, skip=history,
)
history.append(origin)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
with context.render_context.push_state(compiled_parent, isolated_context=False):
return compiled_parent._render(context)
class IncludeNode(Node):
context_key = '__include_context'
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try our cache, and get_template()
template_name = template
cache = context.render_context.dicts[0].setdefault(self, {})
template = cache.get(template_name)
if template is None:
template = context.template.engine.get_template(template_name)
cache[template_name] = template
# Use the base.Template of a backends.django.Template.
elif hasattr(template, 'template'):
template = template.template
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception as e:
if context.template.engine.debug:
raise
template_name = getattr(context, 'template_name', None) or 'unknown'
warnings.warn(
"Rendering {%% include '%s' %%} raised %s. In Django 2.1, "
"this exception will be raised rather than silenced and "
"rendered as an empty string." %
(template_name, e.__class__.__name__),
RemovedInDjango21Warning,
)
logger.warning(
"Exception raised while rendering {%% include %%} for "
"template '%s'. Empty string rendered instead.",
template_name,
exc_info=True,
)
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
def construct_relative_path(current_template_name, relative_name):
"""
Convert a relative path (starting with './' or '../') to the full template
name based on the current_template_name.
"""
if not any(relative_name.startswith(x) for x in ["'./", "'../", '"./', '"../']):
# relative_name is a variable or a literal that doesn't contain a
# relative path.
return relative_name
new_name = posixpath.normpath(
posixpath.join(
posixpath.dirname(current_template_name.lstrip('/')),
relative_name.strip('\'"')
)
)
if new_name.startswith('../'):
raise TemplateSyntaxError(
"The relative path '%s' points outside the file hierarchy that "
"template '%s' is in." % (relative_name, current_template_name)
)
if current_template_name.lstrip('/') == new_name:
raise TemplateSyntaxError(
"The relative path '%s' was translated to template name '%s', the "
"same template in which the tag appears."
% (relative_name, current_template_name)
)
return '"%s"' % new_name
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
mgunyho/pyspread
|
refs/heads/master
|
pyspread/src/lib/parsers.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
parsers
=======
Provides
--------
* get_font_from_data
* get_pen_from_data
* color2code
* code2color
* parse_dict_strings
* is_svg
"""
try:
import rsvg
import glib
except ImportError:
rsvg = None
import ast
import wx
from src.sysvars import get_default_font
def get_font_from_data(fontdata):
"""Returns wx.Font from fontdata string"""
textfont = get_default_font()
if fontdata != "":
nativefontinfo = wx.NativeFontInfo()
nativefontinfo.FromString(fontdata)
# OS X does not like a PointSize of 0
# Therefore, it is explicitly set to the system default font point size
if not nativefontinfo.GetPointSize():
nativefontinfo.SetPointSize(get_default_font().GetPointSize())
textfont.SetNativeFontInfo(nativefontinfo)
return textfont
def get_pen_from_data(pendata):
"""Returns wx.Pen from pendata attribute list"""
pen_color = wx.Colour()
pen_color.SetRGB(pendata[0])
pen = wx.Pen(pen_color, *pendata[1:])
pen.SetJoin(wx.JOIN_MITER)
return pen
def code2color(color_string):
"""Returns wx.Colour from a string of a 3-tuple of floats in [0.0, 1.0]"""
color_tuple = ast.literal_eval(color_string)
color_tuple_int = map(lambda x: int(x * 255.0), color_tuple)
return wx.Colour(*color_tuple_int)
def color2code(color):
"""Returns repr of 3-tuple of floats in [0.0, 1.0] from wx.Colour"""
return unicode(tuple(i / 255.0 for i in color.Get()))
def color_pack2rgb(packed):
"""Returns r, g, b tuple from packed wx.ColourGetRGB value"""
r = packed & 255
g = (packed & (255 << 8)) >> 8
b = (packed & (255 << 16)) >> 16
return r, g, b
def color_rgb2pack(r, g, b):
"""Returns packed wx.ColourGetRGB value from r, g, b tuple"""
return r + (g << 8) + (b << 16)
def unquote_string(code):
"""Returns a string from code that contains a repr of the string"""
scode = code.strip()
assert scode[-1] in ["'", '"']
assert scode[0] in ["'", '"'] or scode[1] in ["'", '"']
return ast.literal_eval(scode)
def parse_dict_strings(code):
"""Generator of elements of a dict that is given in the code string
Parsing is shallow, i.e. all content is yielded as strings
Parameters
----------
code: String
\tString that contains a dict
"""
i = 0
level = 0
chunk_start = 0
curr_paren = None
for i, char in enumerate(code):
if char in ["(", "[", "{"] and curr_paren is None:
level += 1
elif char in [")", "]", "}"] and curr_paren is None:
level -= 1
elif char in ['"', "'"]:
if curr_paren == char:
curr_paren = None
elif curr_paren is None:
curr_paren = char
if level == 0 and char in [':', ','] and curr_paren is None:
yield code[chunk_start: i].strip()
chunk_start = i + 1
yield code[chunk_start:i + 1].strip()
def common_start(strings):
"""Returns start sub-string that is common for all given strings
Parameters
----------
strings: List of strings
\tThese strings are evaluated for their largest common start string
"""
def gen_start_strings(string):
"""Generator that yield start sub-strings of length 1, 2, ..."""
for i in xrange(1, len(string) + 1):
yield string[:i]
# Empty strings list
if not strings:
return ""
start_string = ""
# Get sucessively start string of 1st string
for start_string in gen_start_strings(max(strings)):
if not all(string.startswith(start_string) for string in strings):
return start_string[:-1]
return start_string
def is_svg(code):
"""Checks if code is an svg image
Parameters
----------
code: String
\tCode to be parsed in order to check svg complaince
"""
if rsvg is None:
return
try:
rsvg.Handle(data=code)
except glib.GError:
return False
# The SVG file has to refer to its xmlns
# Hopefully, it does so wiyhin the first 1000 characters
if "http://www.w3.org/2000/svg" in code[:1000]:
return True
return False
|
jhancock93/autorest
|
refs/heads/master
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Lro/autorestlongrunningoperationtestservice/models/product.py
|
14
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Product(Resource):
"""Product.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar type: Resource Type
:vartype type: str
:param tags:
:type tags: dict
:param location: Resource Location
:type location: str
:ivar name: Resource Name
:vartype name: str
:param provisioning_state:
:type provisioning_state: str
:ivar provisioning_state_values: Possible values include: 'Succeeded',
'Failed', 'canceled', 'Accepted', 'Creating', 'Created', 'Updating',
'Updated', 'Deleting', 'Deleted', 'OK'
:vartype provisioning_state_values: str or :class:`enum
<fixtures.acceptancetestslro.models.enum>`
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'provisioning_state_values': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'provisioning_state_values': {'key': 'properties.provisioningStateValues', 'type': 'str'},
}
def __init__(self, tags=None, location=None, provisioning_state=None):
super(Product, self).__init__(tags=tags, location=location)
self.provisioning_state = provisioning_state
self.provisioning_state_values = None
|
cyberworm/ircbot
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='ircbot',
version='0.1',
description='A small IRC bot framework for Python',
author='Cyberworm',
author_email='cybaworm@gmail.com',
url='https://github.com/cyberworm/ircbot',
packages=['ircbot'],
)
|
andela-ifageyinbo/django
|
refs/heads/master
|
tests/check_framework/test_urls.py
|
84
|
from django.core.checks.urls import check_url_config
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckUrlsTest(SimpleTestCase):
@override_settings(ROOT_URLCONF='check_framework.urls.no_warnings')
def test_no_warnings(self):
result = check_url_config(None)
self.assertEqual(result, [])
@override_settings(ROOT_URLCONF='check_framework.urls.include_with_dollar')
def test_include_with_dollar(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W001')
expected_msg = "Your URL pattern '^include-with-dollar$' uses include with a regex ending with a '$'."
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls.beginning_with_slash')
def test_beginning_with_slash(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W002')
expected_msg = "Your URL pattern '/starting-with-slash/$' has a regex beginning with a '/'"
self.assertIn(expected_msg, warning.msg)
@override_settings(ROOT_URLCONF='check_framework.urls.name_with_colon')
def test_name_with_colon(self):
result = check_url_config(None)
self.assertEqual(len(result), 1)
warning = result[0]
self.assertEqual(warning.id, 'urls.W003')
expected_msg = "Your URL pattern '^$' [name='name_with:colon'] has a name including a ':'."
self.assertIn(expected_msg, warning.msg)
|
Iotic-Labs/py-ubjson
|
refs/heads/dev-contrib
|
test/__init__.py
|
12133432
| |
Adamwinwho/ttbug
|
refs/heads/master
|
ttbug/blocks/__init__.py
|
12133432
| |
1modm/mesc
|
refs/heads/master
|
thirdparty/__init__.py
|
12133432
| |
v-legoff/pa-poc2
|
refs/heads/master
|
bundles/chat/websockets/__init__.py
|
12133432
| |
CingHu/neutron-ustack
|
refs/heads/master
|
neutron/tests/unit/vmware/extensions/__init__.py
|
12133432
| |
amisrs/one-eighty
|
refs/heads/master
|
angular_flask/lib/python2.7/site-packages/websocket/_http.py
|
15
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import errno
import os
import socket
import sys
import six
from ._exceptions import *
from ._logging import *
from ._socket import*
from ._ssl_compat import *
from ._url import *
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
__all__ = ["proxy_info", "connect", "read_headers"]
class proxy_info(object):
def __init__(self, **options):
self.host = options.get("http_proxy_host", None)
if self.host:
self.port = options.get("http_proxy_port", 0)
self.auth = options.get("http_proxy_auth", None)
self.no_proxy = options.get("http_no_proxy", None)
else:
self.port = 0
self.auth = None
self.no_proxy = None
def connect(url, options, proxy, socket):
hostname, port, resource, is_secure = parse_url(url)
if socket:
return socket, (hostname, port, resource)
addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
hostname, port, is_secure, proxy)
if not addrinfo_list:
raise WebSocketException(
"Host not found.: " + hostname + ":" + str(port))
sock = None
try:
sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
if need_tunnel:
sock = _tunnel(sock, hostname, port, auth)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
except:
if sock:
sock.close()
raise
def _get_addrinfo_list(hostname, port, is_secure, proxy):
phost, pport, pauth = get_proxy_info(
hostname, is_secure, proxy.host, proxy.port, proxy.auth, proxy.no_proxy)
if not phost:
addrinfo_list = socket.getaddrinfo(
hostname, port, 0, 0, socket.SOL_TCP)
return addrinfo_list, False, None
else:
pport = pport and pport or 80
addrinfo_list = socket.getaddrinfo(phost, pport, 0, 0, socket.SOL_TCP)
return addrinfo_list, True, pauth
def _open_socket(addrinfo_list, sockopt, timeout):
err = None
for addrinfo in addrinfo_list:
family = addrinfo[0]
sock = socket.socket(family)
sock.settimeout(timeout)
for opts in DEFAULT_SOCKET_OPTION:
sock.setsockopt(*opts)
for opts in sockopt:
sock.setsockopt(*opts)
address = addrinfo[4]
try:
sock.connect(address)
except socket.error as error:
error.remote_ip = str(address[0])
if error.errno in (errno.ECONNREFUSED, ):
err = error
continue
else:
raise
else:
break
else:
raise err
return sock
def _can_use_sni():
return six.PY2 and sys.version_info >= (2, 7, 9) or sys.version_info >= (3, 2)
def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_SSLv23))
if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
context.load_verify_locations(cafile=sslopt.get('ca_certs', None))
if sslopt.get('certfile', None):
context.load_cert_chain(
sslopt['certfile'],
sslopt.get('keyfile', None),
sslopt.get('password', None),
)
# see
# https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
context.verify_mode = sslopt['cert_reqs']
if HAVE_CONTEXT_CHECK_HOSTNAME:
context.check_hostname = check_hostname
if 'ciphers' in sslopt:
context.set_ciphers(sslopt['ciphers'])
if 'cert_chain' in sslopt:
certfile, keyfile, password = sslopt['cert_chain']
context.load_cert_chain(certfile, keyfile, password)
return context.wrap_socket(
sock,
do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
server_hostname=hostname,
)
def _ssl_socket(sock, user_sslopt, hostname):
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
sslopt.update(user_sslopt)
if os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE'):
certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
else:
certPath = os.path.join(
os.path.dirname(__file__), "cacert.pem")
if os.path.isfile(certPath) and user_sslopt.get('ca_certs', None) is None:
sslopt['ca_certs'] = certPath
check_hostname = sslopt["cert_reqs"] != ssl.CERT_NONE and sslopt.pop(
'check_hostname', True)
if _can_use_sni():
sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
else:
sslopt.pop('check_hostname', True)
sock = ssl.wrap_socket(sock, **sslopt)
if not HAVE_CONTEXT_CHECK_HOSTNAME and check_hostname:
match_hostname(sock.getpeercert(), hostname)
return sock
def _tunnel(sock, host, port, auth):
debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.0\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode()
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
dump("request header", connect_header)
send(sock, connect_header)
try:
status, resp_headers = read_headers(sock)
except Exception as e:
raise WebSocketProxyException(str(e))
if status != 200:
raise WebSocketProxyException(
"failed CONNECT via proxy status: %r" % status)
return sock
def read_headers(sock):
status = None
headers = {}
trace("--- response header ---")
while True:
line = recv_line(sock)
line = line.decode('utf-8').strip()
if not line:
break
trace(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip()
else:
raise WebSocketException("Invalid header")
trace("-----------------------")
return status, headers
|
pklimai/py-junos-eznc
|
refs/heads/master
|
lib/jnpr/junos/resources/autosys.py
|
3
|
"""
Pythonifier for AutoSys Table/View
"""
from jnpr.junos.factory import loadyaml
from os.path import splitext
_YAML_ = splitext(__file__)[0] + '.yml'
globals().update(loadyaml(_YAML_))
|
atalax/libsigrokdecode
|
refs/heads/qi
|
decoders/i2cdemux/pd.py
|
13
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
class Decoder(srd.Decoder):
api_version = 2
id = 'i2cdemux'
name = 'I²C demux'
longname = 'I²C demultiplexer'
desc = 'Demux I²C packets into per-slave-address streams.'
license = 'gplv2+'
inputs = ['i2c']
outputs = [] # TODO: Only known at run-time.
def __init__(self, **kwargs):
self.packets = [] # Local cache of I²C packets
self.slaves = [] # List of known slave addresses
self.stream = -1 # Current output stream
self.streamcount = 0 # Number of created output streams
def start(self):
self.out_python = []
# Grab I²C packets into a local cache, until an I²C STOP condition
# packet comes along. At some point before that STOP condition, there
# will have been an ADDRESS READ or ADDRESS WRITE which contains the
# I²C address of the slave that the master wants to talk to.
# We use this slave address to figure out which output stream should
# get the whole chunk of packets (from START to STOP).
def decode(self, ss, es, data):
cmd, databyte = data
# Add the I²C packet to our local cache.
self.packets.append([ss, es, data])
if cmd in ('ADDRESS READ', 'ADDRESS WRITE'):
if databyte in self.slaves:
self.stream = self.slaves.index(databyte)
return
# We're never seen this slave, add a new stream.
self.slaves.append(databyte)
self.out_python.append(self.register(srd.OUTPUT_PYTHON,
proto_id='i2c-%s' % hex(databyte)))
self.stream = self.streamcount
self.streamcount += 1
elif cmd == 'STOP':
if self.stream == -1:
raise Exception('Invalid stream!') # FIXME?
# Send the whole chunk of I²C packets to the correct stream.
for p in self.packets:
self.put(p[0], p[1], self.out_python[self.stream], p[2])
self.packets = []
self.stream = -1
else:
pass # Do nothing, only add the I²C packet to our cache.
|
Eveler/libs
|
refs/heads/splited_document_engine
|
__Python__/ufms_blanks/test_PythonReports.py
|
1
|
#!/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Savenko'
def run():
# make some dummy data
_data = [{
"item": _ii,
"sub": [{"item": _jj} for _jj in xrange(_ii * 10, _ii * 10 + 10)]
} for _ii in xrange(10)]
# create report builder
from PythonReports.builder import Builder
_builder = Builder("submain.prt")
# build printout
_printout = _builder.run(_data)
# write printout file
_out = file("submain.prp", "w")
_printout.write(_out)
_printout.validate()
from PythonReports.pdf import write
write(_printout, "result.pdf")
_out.close()
run()
from PythonReports.editor import editor
editor.main()
|
aliceriot/zulip
|
refs/heads/master
|
tools/show-profile-results.py
|
115
|
#!/usr/bin/env python
import sys
import pstats
'''
This is a helper script to make it easy to show profile
results after using a Python decorator. It's meant to be
a simple example that you can hack on, or better yet, you
can find more advanced tools for showing profiler results.
'''
try:
fn = sys.argv[1]
except:
print '''
Please supply a filename. (If you use the profiled decorator,
the file will have a suffix of ".profile".)
'''
sys.exit(1)
p = pstats.Stats(fn)
p.strip_dirs().sort_stats('cumulative').print_stats(25)
p.strip_dirs().sort_stats('time').print_stats(25)
|
mahim97/zulip
|
refs/heads/master
|
zerver/management/commands/logout_all_users.py
|
15
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.sessions import delete_all_deactivated_user_sessions, \
delete_all_user_sessions, delete_realm_user_sessions
class Command(ZulipBaseCommand):
help = "Log out all users."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('--deactivated-only',
action='store_true',
default=False,
help="Only logout all users who are deactivated")
self.add_realm_args(parser, help="Only logout all users in a particular realm")
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
if realm:
delete_realm_user_sessions(realm)
elif options["deactivated_only"]:
delete_all_deactivated_user_sessions()
else:
delete_all_user_sessions()
|
mgireesh05/dev-util
|
refs/heads/master
|
autoupdate_lib.py
|
2
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing common autoupdate utilities and protocol dictionaries."""
import datetime
import os
import time
from xml.dom import minidom
APP_ID = 'e96281a6-d1af-4bde-9a0a-97b76e56dc57'
# Responses for the various Omaha protocols indexed by the protocol version.
UPDATE_RESPONSE = {}
UPDATE_RESPONSE['2.0'] = """<?xml version="1.0" encoding="UTF-8"?>
<gupdate xmlns="http://www.google.com/update2/response" protocol="2.0">
<daystart elapsed_seconds="%(time_elapsed)s"/>
<app appid="{%(appid)s}" status="ok">
<ping status="ok"/>
<updatecheck
ChromeOSVersion="9999.0.0"
codebase="%(url)s"
hash="%(sha1)s"
sha256="%(sha256)s"
needsadmin="false"
size="%(size)s"
IsDelta="%(is_delta_format)s"
status="ok"
%(extra_attr)s/>
</app>
</gupdate>
"""
UPDATE_RESPONSE['3.0'] = """<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0">
<daystart elapsed_seconds="%(time_elapsed)s"/>
<app appid="{%(appid)s}" status="ok">
<ping status="ok"/>
<updatecheck status="ok">
<urls>
<url codebase="%(codebase)s/"/>
</urls>
<manifest version="9999.0.0">
<packages>
<package hash="%(sha1)s" name="%(filename)s" size="%(size)s"
required="true"/>
</packages>
<actions>
<action event="postinstall"
ChromeOSVersion="9999.0.0"
sha256="%(sha256)s"
needsadmin="false"
IsDelta="%(is_delta_format)s"
%(extra_attr)s />
</actions>
</manifest>
</updatecheck>
</app>
</response>
"""
# Responses for the various Omaha protocols indexed by the protocol version
# when there's no update to be served.
NO_UPDATE_RESPONSE = {}
NO_UPDATE_RESPONSE['2.0'] = """<?xml version="1.0" encoding="UTF-8"?>
<gupdate xmlns="http://www.google.com/update2/response" protocol="2.0">
<daystart elapsed_seconds="%(time_elapsed)s"/>
<app appid="{%(appid)s}" status="ok">
<ping status="ok"/>
<updatecheck status="noupdate"/>
</app>
</gupdate>
"""
NO_UPDATE_RESPONSE['3.0'] = """<?xml version="1.0" encoding="UTF-8"?>
<response protocol="3.0">
<daystart elapsed_seconds="%(time_elapsed)s"/>
<app appid="{%(appid)s}" status="ok">
<ping status="ok"/>
<updatecheck status="noupdate"/>
</app>
</response>
"""
class UnknownProtocolRequestedException(Exception):
"""Raised when an supported protocol is specified."""
def GetSecondsSinceMidnight():
"""Returns the seconds since midnight as a decimal value."""
now = time.localtime()
return now[3] * 3600 + now[4] * 60 + now[5]
def GetCommonResponseValues():
"""Returns a dictionary of default values for the response."""
response_values = {}
response_values['appid'] = APP_ID
response_values['time_elapsed'] = GetSecondsSinceMidnight()
return response_values
def GetSubstitutedResponse(response_dict, protocol, response_values):
"""Substitutes the protocol-specific response with response_values.
Args:
response_dict: Canned response messages indexed by protocol.
protocol: client's protocol version from the request Xml.
response_values: Values to be substituted in the canned response.
Returns:
Xml string to be passed back to client.
"""
response_xml = response_dict[protocol] % response_values
return response_xml
def GetUpdateResponse(sha1, sha256, size, url, is_delta_format, protocol,
critical_update=False):
"""Returns a protocol-specific response to the client for a new update.
Args:
sha1: SHA1 hash of update blob
sha256: SHA256 hash of update blob
size: size of update blob
url: where to find update blob
is_delta_format: true if url refers to a delta payload
protocol: client's protocol version from the request Xml.
critical_update: whether this is a critical update.
Returns:
Xml string to be passed back to client.
"""
response_values = GetCommonResponseValues()
response_values['sha1'] = sha1
response_values['sha256'] = sha256
response_values['size'] = size
response_values['url'] = url
(codebase, filename) = os.path.split(url)
response_values['codebase'] = codebase
response_values['filename'] = filename
response_values['is_delta_format'] = is_delta_format
extra_attributes = []
if critical_update:
# The date string looks like '20111115' (2011-11-15). As of writing,
# there's no particular format for the deadline value that the
# client expects -- it's just empty vs. non-empty.
date_str = datetime.date.today().strftime('%Y%m%d')
extra_attributes.append('deadline="%s"' % date_str)
response_values['extra_attr'] = ' '.join(extra_attributes)
return GetSubstitutedResponse(UPDATE_RESPONSE, protocol, response_values)
def GetNoUpdateResponse(protocol):
"""Returns a protocol-specific response to the client for no update.
Args:
protocol: client's protocol version from the request Xml.
Returns:
Xml string to be passed back to client.
"""
response_values = GetCommonResponseValues()
return GetSubstitutedResponse(NO_UPDATE_RESPONSE, protocol, response_values)
def ParseUpdateRequest(request_string):
"""Returns a tuple containing information parsed from an update request.
Args:
request_dom: an xml string containing the update request.
Returns tuple consisting of protocol string, app element, event element and
update_check element.
Raises UnknownProtocolRequestedException if we do not understand the
protocol.
"""
request_dom = minidom.parseString(request_string)
protocol = request_dom.firstChild.getAttribute('protocol')
supported_protocols = '2.0', '3.0'
if protocol not in supported_protocols:
raise UnknownProtocolRequestedException('Supported protocols are %s' %
supported_protocols)
element_dict = {}
for name in ['event', 'app', 'updatecheck']:
element_dict[name] = 'o:' + name if protocol == '2.0' else name
app = request_dom.firstChild.getElementsByTagName(element_dict['app'])[0]
event = request_dom.getElementsByTagName(element_dict['event'])
update_check = request_dom.getElementsByTagName(element_dict['updatecheck'])
return protocol, app, event, update_check
|
tlangerak/Multi-Agent-Systems
|
refs/heads/master
|
ag/googlesearch/googlesearch.py
|
3
|
'''
Created on May 5, 2017
@author: anthony
'''
import urllib2
import math
import re
from bs4 import BeautifulSoup
from pprint import pprint
from threading import Thread
from collections import deque
from time import sleep
class GoogleSearch:
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/ 58.0.3029.81 Safari/537.36"
SEARCH_URL = "https://google.com/search"
RESULT_SELECTOR = ".srg h3.r a"
TOTAL_SELECTOR = "#resultStats"
RESULTS_PER_PAGE = 10
DEFAULT_HEADERS = [
('User-Agent', USER_AGENT),
("Accept-Language", "en-US,en;q=0.5"),
]
def search(self, query, num_results = 10, prefetch_pages = True, prefetch_threads = 10, language = "en"):
searchResults = []
pages = int(math.ceil(num_results / float(GoogleSearch.RESULTS_PER_PAGE)));
fetcher_threads = deque([])
total = None;
for i in range(pages) :
start = i * GoogleSearch.RESULTS_PER_PAGE
opener = urllib2.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(GoogleSearch.SEARCH_URL + "?q="+ urllib2.quote(query) + "&hl=" + language + ("" if start == 0 else ("&start=" + str(start))))
soup = BeautifulSoup(response.read(), "lxml")
response.close()
if total is None:
totalText = soup.select(GoogleSearch.TOTAL_SELECTOR)[0].children.next().encode('utf-8')
total = long(re.sub("[',\. ]", "", re.search("(([0-9]+[',\. ])*[0-9]+)", totalText).group(1)))
results = self.parseResults(soup.select(GoogleSearch.RESULT_SELECTOR))
if len(searchResults) + len(results) > num_results:
del results[num_results - len(searchResults):]
searchResults += results
if prefetch_pages:
for result in results:
while True:
running = 0
for thread in fetcher_threads:
if thread.is_alive():
running += 1
if running < prefetch_threads:
break
sleep(1)
fetcher_thread = Thread(target=result.getText)
fetcher_thread.start()
fetcher_threads.append(fetcher_thread)
for thread in fetcher_threads:
thread.join()
return SearchResponse(searchResults, total);
def parseResults(self, results):
searchResults = [];
for result in results:
url = result["href"];
title = result.text
searchResults.append(SearchResult(title, url))
return searchResults
class SearchResponse:
def __init__(self, results, total):
self.results = results;
self.total = total;
class SearchResult:
def __init__(self, title, url):
self.title = title
self.url = url
self.__text = None
self.__markup = None
def getText(self):
if self.__text is None:
soup = BeautifulSoup(self.getMarkup(), "lxml")
for junk in soup(["script", "style"]):
junk.extract()
self.__text = soup.get_text()
return self.__text
def getMarkup(self):
if self.__markup is None:
opener = urllib2.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(self.url);
self.__markup = response.read()
return self.__markup
def __str__(self):
return str(self.__dict__)
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
import sys
search = GoogleSearch()
i=1
query = " ".join(sys.argv[1:])
if len(query) == 0:
query = "python"
count = 10
print ("Fetching first " + str(count) + " results for \"" + query + "\"...")
response = search.search(query, count)
print ("TOTAL: " + str(response.total) + " RESULTS")
for result in response.results:
print("RESULT #" +str (i) + ": "+ (result._SearchResult__text if result._SearchResult__text is not None else "[None]") + "\n\n")
i+=1
|
BellScurry/gem5-fault-injection
|
refs/heads/master
|
tests/configs/pc-simple-timing-ruby.py
|
11
|
# Copyright (c) 2012 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nilay Vaish
import m5, os, optparse, sys
from m5.objects import *
m5.util.addToPath('../configs/common')
from Benchmarks import SysConfig
import FSConfig
m5.util.addToPath('../configs/ruby')
m5.util.addToPath('../configs/topologies')
import Ruby
import Options
# Add the ruby specific and protocol specific options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Ruby.define_options(parser)
(options, args) = parser.parse_args()
# Set the default cache size and associativity to be very small to encourage
# races between requests and writebacks.
options.l1d_size="32kB"
options.l1i_size="32kB"
options.l2_size="4MB"
options.l1d_assoc=2
options.l1i_assoc=2
options.l2_assoc=2
options.num_cpus = 2
#the system
mdesc = SysConfig(disk = 'linux-x86.img')
system = FSConfig.makeLinuxX86System('timing', options.num_cpus,
mdesc=mdesc, Ruby=True)
# Dummy voltage domain for all our clock domains
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.kernel = FSConfig.binary('x86_64-vmlinux-2.6.22.9.smp')
system.clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain = system.voltage_domain)
system.cpu_clk_domain = SrcClockDomain(clock = '2GHz',
voltage_domain = system.voltage_domain)
system.cpu = [TimingSimpleCPU(cpu_id=i, clk_domain = system.cpu_clk_domain)
for i in xrange(options.num_cpus)]
Ruby.create_system(options, True, system, system.iobus, system._dma_ports)
# Create a seperate clock domain for Ruby
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
# Connect the ruby io port to the PIO bus,
# assuming that there is just one such port.
system.iobus.master = system.ruby._io_port.slave
for (i, cpu) in enumerate(system.cpu):
# create the interrupt controller
cpu.createInterruptController()
# Tie the cpu ports to the correct ruby system ports
cpu.icache_port = system.ruby._cpu_ports[i].slave
cpu.dcache_port = system.ruby._cpu_ports[i].slave
cpu.itb.walker.port = system.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = system.ruby._cpu_ports[i].slave
cpu.interrupts[0].pio = system.ruby._cpu_ports[i].master
cpu.interrupts[0].int_master = system.ruby._cpu_ports[i].slave
cpu.interrupts[0].int_slave = system.ruby._cpu_ports[i].master
root = Root(full_system = True, system = system)
m5.ticks.setGlobalFrequency('1THz')
|
jessstrap/servotk
|
refs/heads/master
|
tests/wpt/css-tests/tools/pytest/doc/en/example/assertion/failure_demo.py
|
179
|
from pytest import raises
import _pytest._code
import py
def otherfunc(a,b):
assert a==b
def somefunc(x,y):
otherfunc(x,y)
def otherfunc_multi(a,b):
assert (a ==
b)
def test_generative(param1, param2):
assert param1 * 2 < param2
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
metafunc.addcall(funcargs=dict(param1=3, param2=6))
class TestFailing(object):
def test_simple(self):
def f():
return 42
def g():
return 43
assert f() == g()
def test_simple_multiline(self):
otherfunc_multi(
42,
6*9)
def test_not(self):
def f():
return 42
assert not f()
class TestSpecialisedExplanations(object):
def test_eq_text(self):
assert 'spam' == 'eggs'
def test_eq_similar_text(self):
assert 'foo 1 bar' == 'foo 2 bar'
def test_eq_multiline_text(self):
assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100
assert a == b
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100
assert a == b
def test_eq_list(self):
assert [0, 1, 2] == [0, 1, 3]
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
b = [0]*100 + [2] + [3]*100
assert a == b
def test_eq_dict(self):
assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
def test_eq_set(self):
assert set([0, 10, 11, 12]) == set([0, 20, 21])
def test_eq_longer_list(self):
assert [1,2] == [1,2,3]
def test_in_list(self):
assert 1 in [0, 2, 3, 4, 5]
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
assert 'foo' not in text
def test_not_in_text_single(self):
text = 'single foo line'
assert 'foo' not in text
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
assert 'foo' not in text
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
assert 'f'*70 not in text
def test_attribute():
class Foo(object):
b = 1
i = Foo()
assert i.b == 2
def test_attribute_instance():
class Foo(object):
b = 1
assert Foo().b == 2
def test_attribute_failure():
class Foo(object):
def _get_b(self):
raise Exception('Failed to get attrib')
b = property(_get_b)
i = Foo()
assert i.b == 2
def test_attribute_multiple():
class Foo(object):
b = 1
class Bar(object):
b = 2
assert Foo().b == Bar().b
def globf(x):
return x+1
class TestRaises:
def test_raises(self):
s = 'qwe'
raises(TypeError, "int(s)")
def test_raises_doesnt(self):
raises(IOError, "int('3')")
def test_raise(self):
raise ValueError("demo error")
def test_tupleerror(self):
a,b = [1]
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
print ("l is %r" % l)
a,b = l.pop()
def test_some_error(self):
if namenotexi:
pass
def func1(self):
assert 41 == 42
# thanks to Matthew Scott for this test
def test_dynamic_compile_shows_nicely():
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
module = py.std.imp.new_module(name)
code = _pytest._code.compile(src, name, 'exec')
py.builtin.exec_(code, module.__dict__)
py.std.sys.modules[name] = module
module.foo()
class TestMoreErrors:
def test_complex_error(self):
def f():
return 44
def g():
return 43
somefunc(f(), g())
def test_z1_unpack_error(self):
l = []
a,b = l
def test_z2_type_error(self):
l = 3
a,b = l
def test_startswith(self):
s = "123"
g = "456"
assert s.startswith(g)
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
assert f().startswith(g())
def test_global_func(self):
assert isinstance(globf(42), float)
def test_instance(self):
self.x = 6*7
assert self.x != 42
def test_compare(self):
assert globf(10) < 5
def test_try_finally(self):
x = 1
try:
assert x == 0
finally:
x = 0
class TestCustomAssertMsg:
def test_single_line(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b"
def test_multiline(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b\n" \
"or does not appear to be b\none of those"
def test_custom_repr(self):
class JSON:
a = 1
def __repr__(self):
return "This is JSON\n{\n 'foo': 'bar'\n}"
a = JSON()
b = 2
assert a.a == b, a
|
juicer/juicer
|
refs/heads/master
|
juicer/common/RPM.py
|
2
|
# -*- coding: utf-8 -*-
# Juicer - Administer Pulp and Release Carts
# Copyright © 2012,2013, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import juicer.utils
import juicer.utils.Log
import re
class RPM(object):
def __init__(self, source):
self.pgk_name = os.path.basename(source)
# Source is the original location of this RPM. That includes
# both http://.... RPMs and local /home/bro/... ones.
self.source = source
# If this rpm has to be synced later we'll use this to filter
# out just those RPMs.
self.modified = False
url_regex = re.compile(r'^(http)s?://')
if url_regex.match(self.source):
self.synced = False
self.path = None
else:
self.synced = True
self.path = source
def sync(self, destination):
dest_file = os.path.join(destination, self.pgk_name)
# This is the case with stuff that already exists locally
if self.synced and self.source:
return True
if not os.path.exists(destination):
os.mkdir(destination)
self.path = dest_file
juicer.utils.Log.log_debug("Beginning remote->local sync: %s->%s" % (self.source, self.path))
juicer.utils.save_url_as(self.source, dest_file)
self.modified = True
self.synced = True
|
hugobowne/scikit-learn
|
refs/heads/master
|
examples/feature_selection/plot_permutation_test_for_classification.py
|
94
|
"""
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
|
Crystalnix/house-of-life-chromium
|
refs/heads/master
|
chrome/test/functional/codesign.py
|
3
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import commands
import glob
import logging
import os
import sys
import unittest
import pyauto_functional # Must import before pyauto
import pyauto
class CodesignTest(pyauto.PyUITest):
"""Test if the build is code signed"""
def testCodeSign(self):
"""Check the app for codesign and bail out if it's non-branded."""
browser_info = self.GetBrowserInfo()
# bail out if not a branded build
if browser_info['properties']['branding'] != 'Google Chrome':
return
# TODO: Add functionality for other operating systems (see crbug.com/47902)
if self.IsMac():
self._MacCodeSign(browser_info)
def _MacCodeSign(self, browser_info):
valid_text = 'valid on disk'
app_name = 'Google Chrome.app'
# Codesign of the app @ xcodebuild/Release/Google Chrome.app/
app_path = browser_info['child_process_path']
app_path = app_path[:app_path.find(app_name)]
app_path = app_path + app_name
self.assertTrue(valid_text in self._checkCodeSign(app_path))
# Codesign of the frameWork
framework_path = glob.glob(os.path.join(app_path, 'Contents', 'Versions',
'*.*.*.*'))[0]
framework_path = os.path.join(framework_path,
'Google Chrome Framework.framework')
self.assertTrue(valid_text in self._checkCodeSign(framework_path))
def _checkCodeSign(self, file_path):
"""Return the output of the codesign"""
return commands.getoutput('codesign -vvv "%s"' % file_path)
if __name__ == '__main__':
pyauto_functional.Main()
|
pcaro/jurko-suds
|
refs/heads/master
|
tests/test_cache.py
|
7
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( jurko.gospodnetic@pke.hr )
"""
Suds Python library document caching unit tests.
Implemented using the 'pytest' testing framework.
"""
import testutils
if __name__ == "__main__":
testutils.run_using_pytest(globals())
import suds
import suds.cache
import suds.sax.parser
import pytest
from six import b, next, u
import datetime
import os
import os.path
import sys
class MyException(Exception):
"""Local exception class used in the tests in this module."""
pass
class InvisibleMan:
"""Dummy class used for pickling related tests."""
def __init__(self, x):
self.x = x
class MockDateTime(datetime.datetime):
"""
MockDateTime class monkeypatched to replace datetime.datetime.
Allows us to control the exact built-in datetime.datetime.now() return
value. Note that Python does not allow us to monkeypatch
datetime.datetime.now() directly as it is a built-in function.
"""
mock_counter = 0
@staticmethod
def now(*args, **kwargs):
MockDateTime.mock_counter += 1
return MockDateTime.mock_value
class MockFile:
"""
Wrapper around a regular file object allowing controlled file operation
failures.
"""
def __init__(self, opener, file, fail_read):
self.__opener = opener
self.__file = file
self.__fail_read = fail_read
def __getattr__(self, *args, **kwargs):
return getattr(self.__file, *args, **kwargs)
def read(self, *args, **kwargs):
self.__opener.read_counter += 1
if self.__fail_read:
raise MyException
return self.__file.read(*args, **kwargs)
class MockFileOpener:
"""
Mock open() function for the suds.cache module.
May cause such calls to fail or to return our MockFile objects prepared so
some of their functions fail in a controlled manner.
"""
def __init__(self, fail_open=False, fail_read=False):
self.__previous = None
self.__fail_open = fail_open
self.__fail_read = fail_read
self.counter = 0
self.read_counter = 0
def __call__(self, *args, **kwargs):
self.counter += 1
if self.__fail_open:
raise MyException
file = self.__previous(*args, **kwargs)
return MockFile(self, file, fail_read=self.__fail_read)
def apply(self, monkeypatch):
"""Monkeypatch suds.cache module's open() global."""
try:
self.__previous = suds.cache.open
except AttributeError:
self.__previous = open
monkeypatch.setitem(suds.cache.__dict__, "open", self)
def reset(self):
self.counter = 0
self.read_counter = 0
class MockParse:
"""Mock object causing suds.sax.parser.Parser.parse() failures."""
def __init__(self):
self.counter = 0
def __call__(self, *args, **kwargs):
self.counter += 1
raise MyException
def apply(self, monkeypatch):
"""Monkeypatch suds SAX Parser's parse() method."""
monkeypatch.setattr(suds.sax.parser.Parser, "parse", self)
def reset(self):
self.counter = 0
class MockPickleLoad:
"""Mock object causing suds.cache module's pickle load failures."""
def __init__(self):
self.counter = 0
def __call__(self, *args, **kwargs):
self.counter += 1
raise MyException
def apply(self, monkeypatch):
"""Monkeypatch suds.cache module's pickle.load()."""
monkeypatch.setattr(suds.cache.pickle, "load", self)
def reset(self):
self.counter = 0
# Hardcoded values used in different caching test cases.
value_empty = b("")
value_f2 = b("fifi2")
value_f22 = b("fifi22")
value_f3 = b("fifi3")
value_p1 = b("pero1")
value_p11 = b("pero11")
value_p111 = b("pero111")
value_p2 = b("pero2")
value_p22 = b("pero22")
value_unicode = u("\u20AC \u7684 "
"\u010D\u0107\u017E\u0161\u0111"
"\u010C\u0106\u017D\u0160\u0110").encode("utf-8")
# FileCache item expiration test data - duration, current_time, expect_remove.
# Reused for different testing different FileCache derived classes.
file_cache_item_expiration_test_data = ([
# Infinite cache entry durations.
({}, datetime.datetime.min, False),
({}, datetime.timedelta(days=-21), False),
({}, -datetime.datetime.resolution, False),
({}, datetime.timedelta(), False),
({}, datetime.datetime.resolution, False),
({}, datetime.timedelta(days=7), False),
({}, datetime.datetime.max, False)] +
# Finite cache entry durations.
[(duration, current_time, expect_remove)
for duration in (
{"minutes": 7},
{"microseconds": 1},
{"microseconds": -1},
{"hours": -7})
for current_time, expect_remove in (
(datetime.datetime.min, False),
(datetime.timedelta(days=-21), False),
(-datetime.datetime.resolution, False),
(datetime.timedelta(), False),
(datetime.datetime.resolution, True),
(datetime.timedelta(days=7), True),
(datetime.datetime.max, True))])
@pytest.mark.parametrize(("method_name", "params"), (
("clear", []),
("get", ["id"]),
("purge", ["id"]),
("put", ["id", "object"])))
def test_Cache_methods_abstract(monkeypatch, method_name, params):
monkeypatch.delitem(locals(), "e", False)
cache = suds.cache.Cache()
f = getattr(cache, method_name)
e = pytest.raises(Exception, f, *params).value
try:
assert e.__class__ is Exception
assert str(e) == "not-implemented"
finally:
del e # explicitly break circular reference chain in Python 3
class TestDefaultFileCacheLocation:
"""Default FileCache cache location handling tests."""
@pytest.mark.parametrize("cache_class", (
suds.cache.DocumentCache,
suds.cache.FileCache,
suds.cache.ObjectCache))
def test_basic(self, tmpdir, cache_class):
"""
Test default FileCache folder usage.
Initial DocumentCache/FileCache/ObjectCache instantiation with no
explicitly specified location in a process should use
tempfile.mkdtemp() and that folder should be used as its location.
After a single DocumentCache/FileCache/ObjectCache instantiation with
no explicitly specified location, all later DocumentCache/FileCache/
ObjectCache instantiations with no explicitly specified location in the
same process should use that same location folder without additional
tempfile.mkdtemp() calls.
Both initial & non-initial DocumentCache/FileCache/ObjectCache
instantiation with an explicitly specified location should use that
folder as its default location and not make any tempfile.mkdtemp()
calls.
"""
cache_folder_name = "my test cache-%s" % (cache_class.__name__,)
cache_folder = tmpdir.join(cache_folder_name).strpath
fake_cache_folder_name = "my fake cache-%s" % (cache_class.__name__,)
fake_cache_folder = tmpdir.join(fake_cache_folder_name).strpath
test_file = tmpdir.join("test_file.py")
test_file.write("""\
import os.path
import tempfile
original_mkdtemp = tempfile.mkdtemp
mock_mkdtemp_counter = 0
def mock_mkdtemp(*args, **kwargs):
global mock_mkdtemp_counter
mock_mkdtemp_counter += 1
return cache_folder
tempfile.mkdtemp = mock_mkdtemp
def check_cache_folder(expected_exists, expected_mkdtemp_counter, comment):
if os.path.exists(cache_folder) != expected_exists:
if expected_exists:
message = "does not exist when expected"
else:
message = "exists when not expected"
print("Cache folder %%s (%%s)." %% (message, comment))
sys.exit(-2)
if mock_mkdtemp_counter != expected_mkdtemp_counter:
if mock_mkdtemp_counter < expected_mkdtemp_counter:
message = "less"
else:
message = "more"
print("tempfile.mkdtemp() called %%s times then expected (%%s)" %%
(message, comment,))
cache_folder = %(cache_folder)r
fake_cache_folder = %(fake_cache_folder)r
def fake_cache(n):
return fake_cache_folder + str(n)
from suds.cache import DocumentCache, FileCache, ObjectCache
check_cache_folder(False, 0, "import")
assert DocumentCache(fake_cache(1)).location == fake_cache(1)
assert FileCache(fake_cache(2)).location == fake_cache(2)
assert ObjectCache(fake_cache(3)).location == fake_cache(3)
check_cache_folder(False, 0, "initial caches with non-default location")
assert %(cache_class_name)s().location == cache_folder
check_cache_folder(True, 1, "initial cache with default location")
assert DocumentCache().location == cache_folder
assert FileCache().location == cache_folder
assert ObjectCache().location == cache_folder
check_cache_folder(True, 1, "non-initial caches with default location")
assert DocumentCache(fake_cache(4)).location == fake_cache(4)
assert FileCache(fake_cache(5)).location == fake_cache(5)
assert ObjectCache(fake_cache(6)).location == fake_cache(6)
check_cache_folder(True, 1, "non-initial caches with non-default location")
assert DocumentCache().location == cache_folder
assert FileCache().location == cache_folder
assert ObjectCache().location == cache_folder
check_cache_folder(True, 1, "final caches with default location")
""" % {"cache_class_name": cache_class.__name__,
"cache_folder": cache_folder,
"fake_cache_folder": fake_cache_folder})
assert not os.path.exists(cache_folder)
testutils.run_test_process(test_file)
@pytest.mark.parametrize("removal_enabled", (True, False))
def test_remove_on_exit(self, tmpdir, removal_enabled):
"""
Test removing the default cache folder on process exit.
The folder should be removed by default on process exit, but this
behaviour may be disabled by the user.
"""
cache_folder_name = "my test cache-%s" % (removal_enabled,)
cache_folder = tmpdir.join(cache_folder_name).strpath
test_file = tmpdir.join("test_file.py")
test_file.write("""\
import os.path
import tempfile
original_mkdtemp = tempfile.mkdtemp
mock_mkdtemp_counter = 0
def mock_mkdtemp(*args, **kwargs):
global mock_mkdtemp_counter
mock_mkdtemp_counter += 1
return cache_folder
tempfile.mkdtemp = mock_mkdtemp
import suds.cache
if not suds.cache.FileCache.remove_default_location_on_exit:
print("Default FileCache folder removal not enabled by default.")
sys.exit(-2)
suds.cache.FileCache.remove_default_location_on_exit = %(removal_enabled)s
cache_folder = %(cache_folder)r
if os.path.exists(cache_folder):
print("Cache folder exists too early.")
sys.exit(-2)
suds.cache.FileCache()
if not mock_mkdtemp_counter == 1:
print("tempfile.mkdtemp() not called as expected (%%d)." %%
(mock_mkdtemp_counter,))
sys.exit(-2)
if not os.path.isdir(cache_folder):
print("Cache folder not created when expected.")
sys.exit(-2)
""" % {"cache_folder": cache_folder, "removal_enabled": removal_enabled})
assert not os.path.exists(cache_folder)
testutils.run_test_process(test_file)
if removal_enabled:
assert not os.path.exists(cache_folder)
else:
assert os.path.isdir(cache_folder)
class TestDocumentCache:
def compare_document_to_content(self, document, content):
"""Assert that the given XML document and content match."""
assert document.__class__ is suds.sax.document.Document
elements = document.getChildren()
assert len(elements) == 1
element = elements[0]
assert element.__class__ is suds.sax.element.Element
assert suds.byte_str(str(element)) == content
@staticmethod
def construct_XML(element_name="Elemento"):
"""
Construct XML content and a Document wrapping it.
The XML contains a single Element (may be parametrized with the given
element name) and possibly additional sub-elements under it.
"""
#TODO: Update the tests in this group to no longer depend on the exact
# input XML data formatting. They currently expect it to be formatted
# exactly as what gets read back from their DocumentCache.
content = suds.byte_str("""\
<xsd:element name="%s">
<xsd:simpleType>
<xsd:restriction base="xsd:string">
<xsd:enumeration value="alfa"/>
<xsd:enumeration value="beta"/>
<xsd:enumeration value="gamma"/>
</xsd:restriction>
</xsd:simpleType>
</xsd:element>""" % (element_name,))
xml = suds.sax.parser.Parser().parse(suds.BytesIO(content))
assert xml.__class__ is suds.sax.document.Document
return content, xml
def test_cache_document(self, tmpdir):
cache_item_id = "unga1"
cache = suds.cache.DocumentCache(tmpdir.strpath)
assert isinstance(cache, suds.cache.FileCache)
assert cache.get(cache_item_id) is None
content, document = self.construct_XML()
cache.put(cache_item_id, document)
self.compare_document_to_content(cache.get(cache_item_id), content)
def test_cache_element(self, tmpdir):
cache_item_id = "unga1"
cache = suds.cache.DocumentCache(tmpdir.strpath)
assert isinstance(cache, suds.cache.FileCache)
assert cache.get(cache_item_id) is None
content, document = self.construct_XML()
cache.put(cache_item_id, document.root())
self.compare_document_to_content(cache.get(cache_item_id), content)
def test_file_open_failure(self, tmpdir, monkeypatch):
"""
File open failure should cause no cached object to be found, but any
existing underlying cache file should be kept around.
"""
mock_open = MockFileOpener(fail_open=True)
cache_folder = tmpdir.strpath
cache = suds.cache.DocumentCache(cache_folder)
content1, document1 = self.construct_XML("One")
content2, document2 = self.construct_XML("Two")
assert content1 != content2
cache.put("unga1", document1)
mock_open.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock_open.counter == 1
_assert_empty_cache_folder(cache_folder, expected=False)
self.compare_document_to_content(cache.get("unga1"), content1)
mock_open.apply(monkeypatch)
assert cache.get("unga2") is None
monkeypatch.undo()
assert mock_open.counter == 2
_assert_empty_cache_folder(cache_folder, expected=False)
self.compare_document_to_content(cache.get("unga1"), content1)
assert cache.get("unga2") is None
cache.put("unga2", document2)
assert mock_open.counter == 2
mock_open.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock_open.counter == 3
_assert_empty_cache_folder(cache_folder, expected=False)
self.compare_document_to_content(cache.get("unga1"), content1)
self.compare_document_to_content(cache.get("unga2"), content2)
assert mock_open.counter == 3
@pytest.mark.parametrize(("mock", "extra_checks"), (
(MockParse(), [lambda x: True] * 4),
(MockFileOpener(fail_read=True), [
lambda x: x.read_counter != 0,
lambda x: x.read_counter == 0,
lambda x: x.read_counter != 0,
lambda x: x.read_counter == 0])))
def test_file_operation_failure(self, tmpdir, monkeypatch, mock,
extra_checks):
"""
File operation failures such as reading failures or failing to parse
data read from such a file should cause no cached object to be found
and the related cache file to be removed.
"""
cache_folder = tmpdir.strpath
cache = suds.cache.DocumentCache(cache_folder)
content1, document1 = self.construct_XML("Eins")
content2, document2 = self.construct_XML("Zwei")
cache.put("unga1", document1)
mock.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock.counter == 1
assert extra_checks[0](mock)
_assert_empty_cache_folder(cache_folder)
mock.reset()
assert cache.get("unga1") is None
cache.put("unga1", document1)
cache.put("unga2", document2)
assert mock.counter == 0
assert extra_checks[1](mock)
mock.reset()
mock.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock.counter == 1
assert extra_checks[2](mock)
_assert_empty_cache_folder(cache_folder, expected=False)
mock.reset()
assert cache.get("unga1") is None
self.compare_document_to_content(cache.get("unga2"), content2)
assert mock.counter == 0
assert extra_checks[3](mock)
@pytest.mark.parametrize(("duration", "current_time", "expect_remove"),
file_cache_item_expiration_test_data)
def test_item_expiration(self, tmpdir, monkeypatch, duration, current_time,
expect_remove):
"""See TestFileCache.item_expiration_test_worker() for more info."""
cache = suds.cache.DocumentCache(tmpdir.strpath, **duration)
content, document = self.construct_XML()
cache.put("willy", document)
TestFileCache.item_expiration_test_worker(cache, "willy", monkeypatch,
current_time, expect_remove)
def test_repeated_reads(self, tmpdir):
cache = suds.cache.DocumentCache(tmpdir.strpath)
content, document = self.construct_XML()
cache.put("unga1", document)
read_XML = cache.get("unga1").str()
assert read_XML == cache.get("unga1").str()
assert cache.get(None) is None
assert cache.get("") is None
assert cache.get("unga2") is None
assert read_XML == cache.get("unga1").str()
class TestFileCache:
@staticmethod
def item_expiration_test_worker(cache, id, monkeypatch, current_time,
expect_remove):
"""
Test how a FileCache & its derived classes expire their item entries.
Facts tested:
* 0 duration should cause cache items never to expire.
* Expired item files should be automatically removed from the cache
folder.
* Negative durations should be treated the same as positive ones.
Requirements on the passed cache object:
* Configures with the correct duration for this test.
* Contains a valid cached item with the given id and its ctime
timestamp + cache.duration must fall into the valid datetime.datetime
value range.
* Must use only public & protected FileCache interfaces to access its
cache item data files.
'current_time' values are expected to be either datetime.datetime or
datetime.timedelta instances with the latter interpreted relative to
the test file's expected expiration time.
"""
assert isinstance(cache, suds.cache.FileCache)
filepath = cache._FileCache__filename(id)
assert os.path.isfile(filepath)
file_timestamp = os.path.getctime(filepath)
file_time = datetime.datetime.fromtimestamp(file_timestamp)
MockDateTime.mock_counter = 0
if isinstance(current_time, datetime.timedelta):
expire_time = file_time + cache.duration
MockDateTime.mock_value = expire_time + current_time
else:
MockDateTime.mock_value = current_time
monkeypatch.setattr(datetime, "datetime", MockDateTime)
assert (cache._getf(id) is None) == expect_remove
monkeypatch.undo()
if cache.duration:
assert MockDateTime.mock_counter == 1
else:
assert MockDateTime.mock_counter == 0
assert os.path.isfile(filepath) == (not expect_remove)
def test_basic_construction(self):
cache = suds.cache.FileCache()
assert isinstance(cache, suds.cache.Cache)
assert cache.duration.__class__ is datetime.timedelta
def test_cached_content_empty(self, tmpdir):
cache_folder = tmpdir.strpath
cache = suds.cache.FileCache(cache_folder)
cache.put("unga1", value_empty)
assert cache.get("unga1") == value_empty
_assert_empty_cache_folder(cache_folder, expected=False)
def test_cached_content_unicode(self, tmpdir):
cache_folder = tmpdir.strpath
cache = suds.cache.FileCache(cache_folder)
cache.put("unga1", value_unicode)
assert cache.get("unga1") == value_unicode
_assert_empty_cache_folder(cache_folder, expected=False)
def test_clear(self, tmpdir):
cache_folder1 = tmpdir.join("fungus").strpath
cache1 = suds.cache.FileCache(cache_folder1)
cache1.put("unga1", value_p1)
_assert_empty_cache_folder(cache_folder1, expected=False)
cache1.put("unga2", value_p2)
_assert_empty_cache_folder(cache_folder1, expected=False)
assert cache1.get("unga1") == value_p1
assert cache1.get("unga2") == value_p2
_assert_empty_cache_folder(cache_folder1, expected=False)
cache1.clear()
_assert_empty_cache_folder(cache_folder1)
assert cache1.get("unga1") is None
assert cache1.get("unga2") is None
_assert_empty_cache_folder(cache_folder1)
cache1.put("unga1", value_p11)
cache1.put("unga2", value_p2)
_assert_empty_cache_folder(cache_folder1, expected=False)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
_assert_empty_cache_folder(cache_folder1, expected=False)
cache_folder2 = tmpdir.join("broccoli").strpath
cache2 = suds.cache.FileCache(cache_folder2)
cache2.put("unga2", value_f2)
assert cache2.get("unga2") == value_f2
assert cache1.get("unga2") == value_p2
cache2.clear()
_assert_empty_cache_folder(cache_folder1, expected=False)
_assert_empty_cache_folder(cache_folder2)
assert cache2.get("unga2") is None
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache2.put("unga2", value_p22)
assert cache2.get("unga2") == value_p22
def test_close_leaves_cached_files_behind(self, tmpdir):
cache_folder1 = tmpdir.join("ana").strpath
cache1 = suds.cache.FileCache(cache_folder1)
cache1.put("unga1", value_p1)
cache1.put("unga2", value_p2)
cache_folder2 = tmpdir.join("nan").strpath
cache2 = suds.cache.FileCache(cache_folder2)
cache2.put("unga2", value_f2)
cache2.put("unga3", value_f3)
del cache1
cache11 = suds.cache.FileCache(cache_folder1)
assert cache11.get("unga1") == value_p1
assert cache11.get("unga2") == value_p2
assert cache2.get("unga2") == value_f2
assert cache2.get("unga3") == value_f3
def test_get_put(self, tmpdir):
cache_folder1 = tmpdir.join("firefly").strpath
cache1 = suds.cache.FileCache(cache_folder1)
_assert_empty_cache_folder(cache_folder1)
assert cache1.get("unga1") is None
cache1.put("unga1", value_p1)
_assert_empty_cache_folder(cache_folder1, expected=False)
assert cache1.get("unga1") == value_p1
assert cache1.get("unga2") is None
cache1.put("unga1", value_p11)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") is None
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache_folder2 = tmpdir.join("semper fi").strpath
cache2 = suds.cache.FileCache(cache_folder2)
_assert_empty_cache_folder(cache_folder2)
assert cache2.get("unga2") is None
cache2.put("unga2", value_f2)
_assert_empty_cache_folder(cache_folder2, expected=False)
assert cache2.get("unga2") == value_f2
assert cache2.get("unga3") is None
cache2.put("unga2", value_f22)
assert cache2.get("unga2") == value_f22
assert cache2.get("unga3") is None
cache2.put("unga3", value_f3)
assert cache2.get("unga2") == value_f22
assert cache2.get("unga3") == value_f3
_assert_empty_cache_folder(cache_folder1, expected=False)
_assert_empty_cache_folder(cache_folder2, expected=False)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
assert cache1.get("unga3") is None
assert cache2.get("unga1") is None
assert cache2.get("unga2") == value_f22
assert cache2.get("unga3") == value_f3
def test_independent_item_expirations(self, tmpdir, monkeypatch):
cache = suds.cache.FileCache(tmpdir.strpath, days=1)
cache.put("unga1", value_p1)
cache.put("unga2", value_p2)
cache.put("unga3", value_f2)
filepath1 = cache._FileCache__filename("unga1")
filepath2 = cache._FileCache__filename("unga2")
filepath3 = cache._FileCache__filename("unga3")
file_timestamp1 = os.path.getctime(filepath1)
file_timestamp2 = file_timestamp1 + 10 * 60 # in seconds
file_timestamp3 = file_timestamp1 + 20 * 60 # in seconds
file_time1 = datetime.datetime.fromtimestamp(file_timestamp1)
file_time1_expiration = file_time1 + cache.duration
original_getctime = os.path.getctime
def mock_getctime(path):
if path == filepath2:
return file_timestamp2
if path == filepath3:
return file_timestamp3
return original_getctime(path)
timedelta = datetime.timedelta
monkeypatch.setattr(os.path, "getctime", mock_getctime)
monkeypatch.setattr(datetime, "datetime", MockDateTime)
MockDateTime.mock_value = file_time1_expiration + timedelta(minutes=15)
assert cache._getf("unga2") is None
assert os.path.isfile(filepath1)
assert not os.path.isfile(filepath2)
assert os.path.isfile(filepath3)
cache._getf("unga3").close()
assert os.path.isfile(filepath1)
assert not os.path.isfile(filepath2)
assert os.path.isfile(filepath3)
MockDateTime.mock_value = file_time1_expiration + timedelta(minutes=25)
assert cache._getf("unga1") is None
assert not os.path.isfile(filepath1)
assert not os.path.isfile(filepath2)
assert os.path.isfile(filepath3)
assert cache._getf("unga3") is None
assert not os.path.isfile(filepath1)
assert not os.path.isfile(filepath2)
assert not os.path.isfile(filepath3)
@pytest.mark.parametrize(("duration", "current_time", "expect_remove"),
file_cache_item_expiration_test_data)
def test_item_expiration(self, tmpdir, monkeypatch, duration, current_time,
expect_remove):
"""See TestFileCache.item_expiration_test_worker() for more info."""
cache = suds.cache.FileCache(tmpdir.strpath, **duration)
cache.put("unga1", value_p1)
TestFileCache.item_expiration_test_worker(cache, "unga1", monkeypatch,
current_time, expect_remove)
def test_non_default_location(self, tmpdir):
FileCache = suds.cache.FileCache
cache_folder1 = tmpdir.join("flip-flop1").strpath
assert not os.path.isdir(cache_folder1)
assert FileCache(location=cache_folder1).location == cache_folder1
_assert_empty_cache_folder(cache_folder1)
cache_folder2 = tmpdir.join("flip-flop2").strpath
assert not os.path.isdir(cache_folder2)
assert FileCache(cache_folder2).location == cache_folder2
_assert_empty_cache_folder(cache_folder2)
def test_purge(self, tmpdir):
cache_folder1 = tmpdir.join("flamenco").strpath
cache1 = suds.cache.FileCache(cache_folder1)
cache1.put("unga1", value_p1)
assert cache1.get("unga1") == value_p1
cache1.purge("unga1")
_assert_empty_cache_folder(cache_folder1)
assert cache1.get("unga1") is None
cache1.put("unga1", value_p11)
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache1.purge("unga1")
assert cache1.get("unga1") is None
assert cache1.get("unga2") == value_p2
cache1.put("unga1", value_p111)
cache_folder2 = tmpdir.join("shadow").strpath
cache2 = suds.cache.FileCache(cache_folder2)
cache2.put("unga2", value_f2)
cache2.purge("unga2")
_assert_empty_cache_folder(cache_folder2)
assert cache1.get("unga1") == value_p111
assert cache1.get("unga2") == value_p2
assert cache2.get("unga2") is None
def test_reused_cache_folder(self, tmpdir):
cache_folder = tmpdir.strpath
cache1 = suds.cache.FileCache(cache_folder)
_assert_empty_cache_folder(cache_folder)
assert cache1.get("unga1") is None
cache1.put("unga1", value_p1)
assert cache1.get("unga1") == value_p1
assert cache1.get("unga2") is None
cache1.put("unga1", value_p11)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") is None
cache1.put("unga2", value_p2)
assert cache1.get("unga1") == value_p11
assert cache1.get("unga2") == value_p2
cache2 = suds.cache.FileCache(cache_folder)
assert cache2.get("unga1") == value_p11
assert cache2.get("unga2") == value_p2
cache2.put("unga2", value_f2)
cache2.put("unga3", value_f3)
assert cache1.get("unga2") == value_f2
assert cache1.get("unga3") == value_f3
cache1.purge("unga2")
assert cache2.get("unga2") is None
cache1.clear()
assert cache2.get("unga1") is None
assert cache2.get("unga3") is None
@pytest.mark.parametrize("params", (
{},
{"microseconds": 1},
{"milliseconds": 1},
{"seconds": 1},
{"minutes": 1},
{"hours": 1},
{"days": 1},
{"weeks": 1},
{"microseconds": -1},
{"milliseconds": -1},
{"seconds": -1},
{"minutes": -1},
{"hours": -1},
{"days": -1},
{"weeks": -1},
{"weeks": 1, "days": 2, "hours": 7, "minutes": 0, "seconds": -712}))
def test_set_durations(self, tmpdir, params):
cache = suds.cache.FileCache(tmpdir.strpath, **params)
assert cache.duration == datetime.timedelta(**params)
def test_version(self, tmpdir):
fake_version_info = "--- fake version info ---"
assert suds.__version__ != fake_version_info
version_file = tmpdir.join("version")
cache_folder = tmpdir.strpath
cache = suds.cache.FileCache(cache_folder)
assert version_file.read() == suds.__version__
cache.put("unga1", value_p1)
version_file.write(fake_version_info)
assert cache.get("unga1") == value_p1
cache2 = suds.cache.FileCache(cache_folder)
_assert_empty_cache_folder(cache_folder)
assert cache.get("unga1") is None
assert cache2.get("unga1") is None
assert version_file.read() == suds.__version__
cache.put("unga1", value_p11)
cache.put("unga2", value_p22)
version_file.remove()
assert cache.get("unga1") == value_p11
assert cache.get("unga2") == value_p22
cache3 = suds.cache.FileCache(cache_folder)
_assert_empty_cache_folder(cache_folder)
assert cache.get("unga1") is None
assert cache.get("unga2") is None
assert cache2.get("unga1") is None
assert cache3.get("unga1") is None
assert version_file.read() == suds.__version__
def test_NoCache(monkeypatch):
cache = suds.cache.NoCache()
assert isinstance(cache, suds.cache.Cache)
assert cache.get("id") == None
cache.put("id", "something")
assert cache.get("id") == None
#TODO: It should not be an error to call clear() or purge() on a NoCache
# instance.
monkeypatch.delitem(locals(), "e", False)
e = pytest.raises(Exception, cache.purge, "id").value
try:
assert str(e) == "not-implemented"
finally:
del e # explicitly break circular reference chain in Python 3
e = pytest.raises(Exception, cache.clear).value
try:
assert str(e) == "not-implemented"
finally:
del e # explicitly break circular reference chain in Python 3
class TestObjectCache:
def test_basic(self, tmpdir):
cache = suds.cache.ObjectCache(tmpdir.strpath)
assert isinstance(cache, suds.cache.FileCache)
assert cache.get("unga1") is None
assert cache.get("unga2") is None
cache.put("unga1", InvisibleMan(1))
cache.put("unga2", InvisibleMan(2))
read1 = cache.get("unga1")
read2 = cache.get("unga2")
assert read1.__class__ is InvisibleMan
assert read2.__class__ is InvisibleMan
assert read1.x == 1
assert read2.x == 2
def test_file_open_failure(self, tmpdir, monkeypatch):
"""
File open failure should cause no cached object to be found, but any
existing underlying cache file should be kept around.
"""
mock_open = MockFileOpener(fail_open=True)
cache_folder = tmpdir.strpath
cache = suds.cache.ObjectCache(cache_folder)
cache.put("unga1", InvisibleMan(1))
mock_open.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock_open.counter == 1
_assert_empty_cache_folder(cache_folder, expected=False)
assert cache.get("unga1").x == 1
mock_open.apply(monkeypatch)
assert cache.get("unga2") is None
monkeypatch.undo()
assert mock_open.counter == 2
_assert_empty_cache_folder(cache_folder, expected=False)
assert cache.get("unga1").x == 1
assert cache.get("unga2") is None
cache.put("unga2", InvisibleMan(2))
assert mock_open.counter == 2
mock_open.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock_open.counter == 3
_assert_empty_cache_folder(cache_folder, expected=False)
assert cache.get("unga1").x == 1
assert cache.get("unga2").x == 2
assert mock_open.counter == 3
@pytest.mark.parametrize(("mock", "extra_checks"), (
(MockPickleLoad(), [lambda x: True] * 4),
(MockFileOpener(fail_read=True), [
lambda x: x.read_counter != 0,
lambda x: x.read_counter == 0,
lambda x: x.read_counter != 0,
lambda x: x.read_counter == 0])))
def test_file_operation_failure(self, tmpdir, monkeypatch, mock,
extra_checks):
"""
Open file operation failures such as reading failures or failing to
unpickle the data read from such a file should cause no cached object
to be found and the related cache file to be removed.
"""
cache_folder = tmpdir.strpath
cache = suds.cache.ObjectCache(cache_folder)
cache.put("unga1", InvisibleMan(1))
mock.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock.counter == 1
assert extra_checks[0](mock)
_assert_empty_cache_folder(cache_folder)
mock.reset()
assert cache.get("unga1") is None
cache.put("unga1", InvisibleMan(1))
cache.put("unga2", InvisibleMan(2))
assert mock.counter == 0
assert extra_checks[1](mock)
mock.reset()
mock.apply(monkeypatch)
assert cache.get("unga1") is None
monkeypatch.undo()
assert mock.counter == 1
assert extra_checks[2](mock)
_assert_empty_cache_folder(cache_folder, expected=False)
mock.reset()
assert cache.get("unga1") is None
assert cache.get("unga2").x == 2
assert mock.counter == 0
assert extra_checks[3](mock)
@pytest.mark.parametrize(("duration", "current_time", "expect_remove"),
file_cache_item_expiration_test_data)
def test_item_expiration(self, tmpdir, monkeypatch, duration, current_time,
expect_remove):
"""See TestFileCache.item_expiration_test_worker() for more info."""
cache = suds.cache.ObjectCache(tmpdir.strpath, **duration)
cache.put("silly", InvisibleMan(666))
TestFileCache.item_expiration_test_worker(cache, "silly", monkeypatch,
current_time, expect_remove)
def _assert_empty_cache_folder(folder, expected=True):
"""Test utility asserting that a cache folder is or is not empty."""
if not _is_assert_enabled():
return
assert os.path.isdir(folder)
def walk_error(error):
pytest.fail("Error walking through cache folder content.")
root, folders, files = next(os.walk(folder, onerror=walk_error))
assert root == folder
empty = len(folders) == 0 and len(files) == 1 and files[0] == 'version'
if expected:
assert len(folders) == 0
assert len(files) == 1
assert files[0] == 'version'
assert empty, "bad test code"
else:
assert not empty, "unexpected empty cache folder"
def _is_assert_enabled():
"""Return whether Python assertions have been enabled in this module."""
try:
assert False
except AssertionError:
return True
return False
|
pchauncey/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/lvol.py
|
23
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
required: false
active:
version_added: "2.2"
choices: [ "yes", "no" ]
default: "yes"
description:
- Whether the volume is activate and visible to the host.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
pvs:
version_added: "2.2"
description:
- Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
required: false
shrink:
version_added: "2.2"
description:
- shrink if current size is higher than size requested
required: false
default: yes
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol:
vg: firefly
lv: test
size: 512
# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
- lvol:
vg: firefly
lv: test
size: 512
pvs: /dev/sda,/dev/sdb
# Create cache pool logical volume
- lvol:
vg: firefly
lv: lvcache
size: 512m
opts: --type cache-pool
# Create a logical volume of 512g.
- lvol:
vg: firefly
lv: test
size: 512g
# Create a logical volume the size of all remaining space in the volume group
- lvol:
vg: firefly
lv: test
size: 100%FREE
# Create a logical volume with special options
- lvol:
vg: firefly
lv: test
size: 512g
opts: -r 16
# Extend the logical volume to 1024m.
- lvol:
vg: firefly
lv: test
size: 1024
# Extend the logical volume to consume all remaining space in the volume group
- lvol:
vg: firefly
lv: test
size: +100%FREE
# Extend the logical volume to take all remaining space of the PVs
- lvol:
vg: firefly
lv: test
size: 100%PVS
# Resize the logical volume to % of VG
- lvol:
vg: firefly
lv: test
size: 80%VG
force: yes
# Reduce the logical volume to 512m
- lvol:
vg: firefly
lv: test
size: 512
force: yes
# Set the logical volume to 512m and do not try to shrink if size is lower than current one
- lvol:
vg: firefly
lv: test
size: 512
shrink: no
# Remove the logical volume.
- lvol:
vg: firefly
lv: test
state: absent
force: yes
# Create a snapshot volume of the test logical volume.
- lvol:
vg: firefly
lv: test
snapshot: snap1
size: 100m
# Deactivate a logical volume
- lvol:
vg: firefly
lv: test
active: false
# Create a deactivated logical volume
- lvol:
vg: firefly
lv: test
size: 512g
active: false
'''
import re
from ansible.module_utils.basic import AnsibleModule
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[','').replace(']',''),
'size': int(decimal_point.match(parts[1]).group(1)),
'active': (parts[2][4] == 'a')
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
shrink=dict(type='bool', default='yes'),
active=dict(type='bool', default='yes'),
snapshot=dict(type='str', default=None),
pvs=dict(type='str')
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found is None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
active = module.boolean(module.params['active'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit():
raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
msg = ''
if this_lv is None:
if state == 'present':
### create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif not size:
pass
elif size_opt == 'l':
### Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(
msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
(this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
)
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
### resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
|
fenner/mibbinator
|
refs/heads/master
|
mibbinator/mib/models.py
|
1
|
from django.db import models
class Restriction(models.Model):
name = models.CharField(maxlength=30)
description = models.CharField(maxlength=255, blank=True)
public = models.BooleanField()
notes = models.TextField(blank=True)
def __str__(self):
return self.name
class Admin:
pass
class Module(models.Model):
module = models.CharField(maxlength=100, unique=True)
source = models.CharField(maxlength=100, blank=True)
srcstat = models.CharField(maxlength=20, blank=True) # this was an enum in the original
xdate = models.DateTimeField(null=True, blank=True)
contact = models.TextField(blank=True)
lastrevised = models.DateTimeField(null=True, blank=True)
smidump = models.TextField()
org = models.CharField(maxlength=255, blank=True)
copyright = models.TextField(blank=True)
restriction = models.ForeignKey(Restriction, null=True, blank=True)
def __str__(self):
return self.module
class Admin:
pass
class Object(models.Model):
object = models.CharField(maxlength=100)
module = models.ForeignKey(Module)
type = models.CharField(maxlength=20)
syntax = models.TextField(blank=True)
access = models.CharField(maxlength=50, blank=True)
units = models.CharField(maxlength=50, blank=True)
displayhint = models.CharField(maxlength=50, blank=True)
status = models.CharField(maxlength=50, blank=True)
oid = models.CharField(maxlength=255, blank=True)
description = models.TextField(blank=True)
reference = models.TextField(blank=True)
defval = models.TextField(blank=True) # seems odd but can have long enumerations/bits
def save(self):
super(Object, self).save()
parent = ".".join(self.oid.split(".")[:-1])
OID.objects.get_or_create(oid=self.oid, parent=parent)
class Meta:
unique_together=(('module', 'object'),)
class Import(models.Model):
'''Forward references are represented by `Module::obJecTnaMe`
in `imp`.'''
module = models.ForeignKey(Module)
imp = models.CharField(maxlength=255, blank=True)
srcmod = models.ForeignKey(Module, related_name='imported_by', null=True)
object = models.ForeignKey(Object, related_name='imported_by', null=True)
class Meta:
unique_together=(('module','imp'), )
class OID(models.Model):
oid = models.CharField(maxlength=255, unique=True)
# object's null=True is just for development, until I figure out
# the algorithm for "which of several oids is the real one"
object = models.ForeignKey(Object, related_name='primary_oid', null=True)
parent = models.CharField(maxlength=255)
|
OCA/carrier-delivery
|
refs/heads/10.0
|
delivery_optional_invoice_line/delivery.py
|
5
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class delivery_carrier(orm.Model):
_inherit = 'delivery.carrier'
_columns = {
'do_not_create_invoice_line': fields.boolean(
'Do not create line on invoice'),
}
class stock_picking(orm.Model):
_inherit = "stock.picking"
def _prepare_shipping_invoice_line(
self, cr, uid, picking, invoice, context=None
):
res = super(stock_picking, self)._prepare_shipping_invoice_line(
cr, uid, picking, invoice, context=context)
if (
picking.carrier_id
and picking.carrier_id.do_not_create_invoice_line
):
res = None
return res
|
Split-Screen/android_kernel_motorola_titan
|
refs/heads/pac-5.0
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
john-parton/django-oscar
|
refs/heads/master
|
src/oscar/apps/catalogue/admin.py
|
14
|
from django.contrib import admin
from treebeard.admin import TreeAdmin
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_model
AttributeOption = get_model('catalogue', 'AttributeOption')
AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')
Category = get_model('catalogue', 'Category')
Option = get_model('catalogue', 'Option')
Product = get_model('catalogue', 'Product')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
class AttributeInline(admin.TabularInline):
model = ProductAttributeValue
class ProductRecommendationInline(admin.TabularInline):
model = ProductRecommendation
fk_name = 'primary'
raw_id_fields = ['primary', 'recommendation']
class CategoryInline(admin.TabularInline):
model = ProductCategory
extra = 1
class ProductAttributeInline(admin.TabularInline):
model = ProductAttribute
extra = 2
class ProductClassAdmin(admin.ModelAdmin):
list_display = ('name', 'requires_shipping', 'track_stock')
inlines = [ProductAttributeInline]
class ProductAdmin(admin.ModelAdmin):
date_hierarchy = 'date_created'
list_display = ('get_title', 'upc', 'get_product_class', 'structure',
'attribute_summary', 'date_created')
list_filter = ['structure', 'is_discountable']
raw_id_fields = ['parent']
inlines = [AttributeInline, CategoryInline, ProductRecommendationInline]
prepopulated_fields = {"slug": ("title",)}
search_fields = ['upc', 'title']
def get_queryset(self, request):
qs = super(ProductAdmin, self).get_queryset(request)
return (
qs
.select_related('product_class', 'parent')
.prefetch_related(
'attribute_values',
'attribute_values__attribute'))
class ProductAttributeAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'product_class', 'type')
prepopulated_fields = {"code": ("name", )}
class OptionAdmin(admin.ModelAdmin):
pass
class ProductAttributeValueAdmin(admin.ModelAdmin):
list_display = ('product', 'attribute', 'value')
class AttributeOptionInline(admin.TabularInline):
model = AttributeOption
class AttributeOptionGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'option_summary')
inlines = [AttributeOptionInline, ]
class CategoryAdmin(TreeAdmin):
form = movenodeform_factory(Category)
list_display = ('name', 'slug')
admin.site.register(ProductClass, ProductClassAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductAttribute, ProductAttributeAdmin)
admin.site.register(ProductAttributeValue, ProductAttributeValueAdmin)
admin.site.register(AttributeOptionGroup, AttributeOptionGroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(ProductImage)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductCategory)
|
samuelshaner/openmc
|
refs/heads/develop
|
openmc/data/correlated.py
|
2
|
from collections import Iterable
from numbers import Real, Integral
from warnings import warn
import numpy as np
import openmc.checkvalue as cv
from openmc.stats import Tabular, Univariate, Discrete, Mixture, \
Uniform, Legendre
from .function import INTERPOLATION_SCHEME
from .angle_energy import AngleEnergy
from .data import EV_PER_MEV
from .endf import get_list_record, get_tab2_record
class CorrelatedAngleEnergy(AngleEnergy):
"""Correlated angle-energy distribution
Parameters
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
energy_out : Iterable of openmc.stats.Univariate
Distribution of outgoing energies corresponding to each incoming energy
mu : Iterable of Iterable of openmc.stats.Univariate
Distribution of scattering cosine for each incoming/outgoing energy
Attributes
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
energy_out : Iterable of openmc.stats.Univariate
Distribution of outgoing energies corresponding to each incoming energy
mu : Iterable of Iterable of openmc.stats.Univariate
Distribution of scattering cosine for each incoming/outgoing energy
"""
def __init__(self, breakpoints, interpolation, energy, energy_out, mu):
super(CorrelatedAngleEnergy, self).__init__()
self.breakpoints = breakpoints
self.interpolation = interpolation
self.energy = energy
self.energy_out = energy_out
self.mu = mu
@property
def breakpoints(self):
return self._breakpoints
@property
def interpolation(self):
return self._interpolation
@property
def energy(self):
return self._energy
@property
def energy_out(self):
return self._energy_out
@property
def mu(self):
return self._mu
@breakpoints.setter
def breakpoints(self, breakpoints):
cv.check_type('correlated angle-energy breakpoints', breakpoints,
Iterable, Integral)
self._breakpoints = breakpoints
@interpolation.setter
def interpolation(self, interpolation):
cv.check_type('correlated angle-energy interpolation', interpolation,
Iterable, Integral)
self._interpolation = interpolation
@energy.setter
def energy(self, energy):
cv.check_type('correlated angle-energy incoming energy', energy,
Iterable, Real)
self._energy = energy
@energy_out.setter
def energy_out(self, energy_out):
cv.check_type('correlated angle-energy outgoing energy', energy_out,
Iterable, Univariate)
self._energy_out = energy_out
@mu.setter
def mu(self, mu):
cv.check_iterable_type('correlated angle-energy outgoing cosine',
mu, Univariate, 2, 2)
self._mu = mu
def to_hdf5(self, group):
"""Write distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['type'] = np.string_('correlated')
dset = group.create_dataset('energy', data=self.energy)
dset.attrs['interpolation'] = np.vstack((self.breakpoints,
self.interpolation))
# Determine total number of (E,p) pairs and create array
n_tuple = sum(len(d.x) for d in self.energy_out)
eout = np.empty((5, n_tuple))
# Make sure all mu data is tabular
mu_tabular = []
for i, mu_i in enumerate(self.mu):
mu_tabular.append([mu_ij if isinstance(mu_ij, (Tabular, Discrete)) else
mu_ij.to_tabular() for mu_ij in mu_i])
# Determine total number of (mu,p) points and create array
n_tuple = sum(sum(len(mu_ij.x) for mu_ij in mu_i)
for mu_i in mu_tabular)
mu = np.empty((3, n_tuple))
# Create array for offsets
offsets = np.empty(len(self.energy_out), dtype=int)
interpolation = np.empty(len(self.energy_out), dtype=int)
n_discrete_lines = np.empty(len(self.energy_out), dtype=int)
offset_e = 0
offset_mu = 0
# Populate offsets and eout array
for i, d in enumerate(self.energy_out):
n = len(d)
offsets[i] = offset_e
if isinstance(d, Mixture):
discrete, continuous = d.distribution
n_discrete_lines[i] = m = len(discrete)
interpolation[i] = 1 if continuous.interpolation == 'histogram' else 2
eout[0, offset_e:offset_e+m] = discrete.x
eout[1, offset_e:offset_e+m] = discrete.p
eout[2, offset_e:offset_e+m] = discrete.c
eout[0, offset_e+m:offset_e+n] = continuous.x
eout[1, offset_e+m:offset_e+n] = continuous.p
eout[2, offset_e+m:offset_e+n] = continuous.c
else:
if isinstance(d, Tabular):
n_discrete_lines[i] = 0
interpolation[i] = 1 if d.interpolation == 'histogram' else 2
elif isinstance(d, Discrete):
n_discrete_lines[i] = n
interpolation[i] = 1
eout[0, offset_e:offset_e+n] = d.x
eout[1, offset_e:offset_e+n] = d.p
eout[2, offset_e:offset_e+n] = d.c
for j, mu_ij in enumerate(mu_tabular[i]):
if isinstance(mu_ij, Discrete):
eout[3, offset_e+j] = 0
else:
eout[3, offset_e+j] = 1 if mu_ij.interpolation == 'histogram' else 2
eout[4, offset_e+j] = offset_mu
n_mu = len(mu_ij)
mu[0, offset_mu:offset_mu+n_mu] = mu_ij.x
mu[1, offset_mu:offset_mu+n_mu] = mu_ij.p
mu[2, offset_mu:offset_mu+n_mu] = mu_ij.c
offset_mu += n_mu
offset_e += n
# Create dataset for outgoing energy distributions
dset = group.create_dataset('energy_out', data=eout)
# Write interpolation on outgoing energy as attribute
dset.attrs['offsets'] = offsets
dset.attrs['interpolation'] = interpolation
dset.attrs['n_discrete_lines'] = n_discrete_lines
# Create dataset for outgoing angle distributions
group.create_dataset('mu', data=mu)
@classmethod
def from_hdf5(cls, group):
"""Generate correlated angle-energy distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.CorrelatedAngleEnergy
Correlated angle-energy distribution
"""
interp_data = group['energy'].attrs['interpolation']
energy_breakpoints = interp_data[0, :]
energy_interpolation = interp_data[1, :]
energy = group['energy'].value
offsets = group['energy_out'].attrs['offsets']
interpolation = group['energy_out'].attrs['interpolation']
n_discrete_lines = group['energy_out'].attrs['n_discrete_lines']
dset_eout = group['energy_out'].value
energy_out = []
dset_mu = group['mu'].value
mu = []
n_energy = len(energy)
for i in range(n_energy):
# Determine length of outgoing energy distribution and number of
# discrete lines
offset_e = offsets[i]
if i < n_energy - 1:
n = offsets[i+1] - offset_e
else:
n = dset_eout.shape[1] - offset_e
m = n_discrete_lines[i]
# Create discrete distribution if lines are present
if m > 0:
x = dset_eout[0, offset_e:offset_e+m]
p = dset_eout[1, offset_e:offset_e+m]
eout_discrete = Discrete(x, p)
eout_discrete.c = dset_eout[2, offset_e:offset_e+m]
p_discrete = eout_discrete.c[-1]
# Create continuous distribution
if m < n:
interp = INTERPOLATION_SCHEME[interpolation[i]]
x = dset_eout[0, offset_e+m:offset_e+n]
p = dset_eout[1, offset_e+m:offset_e+n]
eout_continuous = Tabular(x, p, interp, ignore_negative=True)
eout_continuous.c = dset_eout[2, offset_e+m:offset_e+n]
# If both continuous and discrete are present, create a mixture
# distribution
if m == 0:
eout_i = eout_continuous
elif m == n:
eout_i = eout_discrete
else:
eout_i = Mixture([p_discrete, 1. - p_discrete],
[eout_discrete, eout_continuous])
# Read angular distributions
mu_i = []
for j in range(n):
# Determine interpolation scheme
interp_code = int(dset_eout[3, offsets[i] + j])
# Determine offset and length
offset_mu = int(dset_eout[4, offsets[i] + j])
if offsets[i] + j < dset_eout.shape[1] - 1:
n_mu = int(dset_eout[4, offsets[i] + j + 1]) - offset_mu
else:
n_mu = dset_mu.shape[1] - offset_mu
# Get data
x = dset_mu[0, offset_mu:offset_mu+n_mu]
p = dset_mu[1, offset_mu:offset_mu+n_mu]
c = dset_mu[2, offset_mu:offset_mu+n_mu]
if interp_code == 0:
mu_ij = Discrete(x, p)
else:
mu_ij = Tabular(x, p, INTERPOLATION_SCHEME[interp_code],
ignore_negative=True)
mu_ij.c = c
mu_i.append(mu_ij)
offset_mu += n_mu
energy_out.append(eout_i)
mu.append(mu_i)
return cls(energy_breakpoints, energy_interpolation,
energy, energy_out, mu)
@classmethod
def from_ace(cls, ace, idx, ldis):
"""Generate correlated angle-energy distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
idx : int
Index in XSS array of the start of the energy distribution data
(LDIS + LOCC - 1)
ldis : int
Index in XSS array of the start of the energy distribution block
(e.g. JXS[11])
Returns
-------
openmc.data.CorrelatedAngleEnergy
Correlated angle-energy distribution
"""
# Read number of interpolation regions and incoming energies
n_regions = int(ace.xss[idx])
n_energy_in = int(ace.xss[idx + 1 + 2*n_regions])
# Get interpolation information
idx += 1
if n_regions > 0:
breakpoints = ace.xss[idx:idx + n_regions].astype(int)
interpolation = ace.xss[idx + n_regions:idx + 2*n_regions].astype(int)
else:
breakpoints = np.array([n_energy_in])
interpolation = np.array([2])
# Incoming energies at which distributions exist
idx += 2*n_regions + 1
energy = ace.xss[idx:idx + n_energy_in]*EV_PER_MEV
# Location of distributions
idx += n_energy_in
loc_dist = ace.xss[idx:idx + n_energy_in].astype(int)
# Initialize list of distributions
energy_out = []
mu = []
# Read each outgoing energy distribution
for i in range(n_energy_in):
idx = ldis + loc_dist[i] - 1
# intt = interpolation scheme (1=hist, 2=lin-lin)
INTTp = int(ace.xss[idx])
intt = INTTp % 10
n_discrete_lines = (INTTp - intt)//10
if intt not in (1, 2):
warn("Interpolation scheme for continuous tabular distribution "
"is not histogram or linear-linear.")
intt = 2
# Secondary energy distribution
n_energy_out = int(ace.xss[idx + 1])
data = ace.xss[idx + 2:idx + 2 + 4*n_energy_out].copy()
data.shape = (4, n_energy_out)
data[0,:] *= EV_PER_MEV
# Create continuous distribution
eout_continuous = Tabular(data[0][n_discrete_lines:],
data[1][n_discrete_lines:]/EV_PER_MEV,
INTERPOLATION_SCHEME[intt],
ignore_negative=True)
eout_continuous.c = data[2][n_discrete_lines:]
if np.any(data[1][n_discrete_lines:] < 0.0):
warn("Correlated angle-energy distribution has negative "
"probabilities.")
# If discrete lines are present, create a mixture distribution
if n_discrete_lines > 0:
eout_discrete = Discrete(data[0][:n_discrete_lines],
data[1][:n_discrete_lines])
eout_discrete.c = data[2][:n_discrete_lines]
if n_discrete_lines == n_energy_out:
eout_i = eout_discrete
else:
p_discrete = min(sum(eout_discrete.p), 1.0)
eout_i = Mixture([p_discrete, 1. - p_discrete],
[eout_discrete, eout_continuous])
else:
eout_i = eout_continuous
energy_out.append(eout_i)
lc = data[3].astype(int)
# Secondary angular distributions
mu_i = []
for j in range(n_energy_out):
if lc[j] > 0:
idx = ldis + abs(lc[j]) - 1
intt = int(ace.xss[idx])
n_cosine = int(ace.xss[idx + 1])
data = ace.xss[idx + 2:idx + 2 + 3*n_cosine]
data.shape = (3, n_cosine)
mu_ij = Tabular(data[0], data[1], INTERPOLATION_SCHEME[intt])
mu_ij.c = data[2]
else:
# Isotropic distribution
mu_ij = Uniform(-1., 1.)
mu_i.append(mu_ij)
# Add cosine distributions for this incoming energy to list
mu.append(mu_i)
return cls(breakpoints, interpolation, energy, energy_out, mu)
@classmethod
def from_endf(cls, file_obj):
"""Generate correlated angle-energy distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positioned at the start of a section for a correlated
angle-energy distribution
Returns
-------
openmc.data.CorrelatedAngleEnergy
Correlated angle-energy distribution
"""
params, tab2 = get_tab2_record(file_obj)
lep = params[3]
ne = params[5]
energy = np.zeros(ne)
n_discrete_energies = np.zeros(ne, dtype=int)
energy_out = []
mu = []
for i in range(ne):
items, values = get_list_record(file_obj)
energy[i] = items[1]
n_discrete_energies[i] = items[2]
# TODO: separate out discrete lines
n_angle = items[3]
n_energy_out = items[5]
values = np.asarray(values)
values.shape = (n_energy_out, n_angle + 2)
# Outgoing energy distribution at the i-th incoming energy
eout_i = values[:,0]
eout_p_i = values[:,1]
energy_out_i = Tabular(eout_i, eout_p_i, INTERPOLATION_SCHEME[lep],
ignore_negative=True)
energy_out.append(energy_out_i)
# Legendre coefficients used for angular distributions
mu_i = []
for j in range(n_energy_out):
mu_i.append(Legendre(values[j,1:]))
mu.append(mu_i)
return cls(tab2.breakpoints, tab2.interpolation, energy,
energy_out, mu)
|
sio2project/oioioi
|
refs/heads/master
|
oioioi/oi/__init__.py
|
12133432
| |
psiq/gdsfactory
|
refs/heads/master
|
gdsdiff/__init__.py
|
12133432
| |
diox/olympia
|
refs/heads/master
|
docs/extensions/__init__.py
|
12133432
| |
liorvh/beeswithmachineguns
|
refs/heads/master
|
beeswithmachineguns/__init__.py
|
12133432
| |
EDUlib/edx-ora2
|
refs/heads/master
|
openassessment/assessment/worker/__init__.py
|
12133432
| |
siutanwong/scikit-learn
|
refs/heads/master
|
sklearn/gaussian_process/tests/__init__.py
|
12133432
| |
kuleshov/deep-learning-models
|
refs/heads/master
|
util/__init__.py
|
12133432
| |
frishberg/django
|
refs/heads/master
|
tests/modeladmin/models.py
|
37
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name='main_concerts')
opening_band = models.ForeignKey(Band, models.CASCADE, related_name='opening_concerts', blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
# This field is intentionally 2 characters long (#16080).
no = models.IntegerField(verbose_name="Number", blank=True, null=True)
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel, models.CASCADE)
|
Sorsly/subtle
|
refs/heads/master
|
google-cloud-sdk/platform/gsutil/third_party/boto/boto/ec2/cloudwatch/alarm.py
|
134
|
# Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from datetime import datetime
from boto.ec2.cloudwatch.listelement import ListElement
from boto.ec2.cloudwatch.dimension import Dimension
from boto.compat import json
from boto.compat import six
class MetricAlarms(list):
def __init__(self, connection=None):
"""
Parses a list of MetricAlarms.
"""
list.__init__(self)
self.connection = connection
def startElement(self, name, attrs, connection):
if name == 'member':
metric_alarm = MetricAlarm(connection)
self.append(metric_alarm)
return metric_alarm
def endElement(self, name, value, connection):
pass
class MetricAlarm(object):
OK = 'OK'
ALARM = 'ALARM'
INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'
_cmp_map = {
'>=': 'GreaterThanOrEqualToThreshold',
'>': 'GreaterThanThreshold',
'<': 'LessThanThreshold',
'<=': 'LessThanOrEqualToThreshold',
}
_rev_cmp_map = dict((v, k) for (k, v) in six.iteritems(_cmp_map))
def __init__(self, connection=None, name=None, metric=None,
namespace=None, statistic=None, comparison=None,
threshold=None, period=None, evaluation_periods=None,
unit=None, description='', dimensions=None,
alarm_actions=None, insufficient_data_actions=None,
ok_actions=None):
"""
Creates a new Alarm.
:type name: str
:param name: Name of alarm.
:type metric: str
:param metric: Name of alarm's associated metric.
:type namespace: str
:param namespace: The namespace for the alarm's metric.
:type statistic: str
:param statistic: The statistic to apply to the alarm's associated
metric.
Valid values: SampleCount|Average|Sum|Minimum|Maximum
:type comparison: str
:param comparison: Comparison used to compare statistic with threshold.
Valid values: >= | > | < | <=
:type threshold: float
:param threshold: The value against which the specified statistic
is compared.
:type period: int
:param period: The period in seconds over which the specified
statistic is applied.
:type evaluation_periods: int
:param evaluation_periods: The number of periods over which data is
compared to the specified threshold.
:type unit: str
:param unit: Allowed Values are:
Seconds|Microseconds|Milliseconds,
Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes,
Bits|Kilobits|Megabits|Gigabits|Terabits,
Percent|Count|
Bytes/Second|Kilobytes/Second|Megabytes/Second|
Gigabytes/Second|Terabytes/Second,
Bits/Second|Kilobits/Second|Megabits/Second,
Gigabits/Second|Terabits/Second|Count/Second|None
:type description: str
:param description: Description of MetricAlarm
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
Example: {
'InstanceId': ['i-0123456', 'i-0123457'],
'LoadBalancerName': 'test-lb'
}
:type alarm_actions: list of strs
:param alarm_actions: A list of the ARNs of the actions to take in
ALARM state
:type insufficient_data_actions: list of strs
:param insufficient_data_actions: A list of the ARNs of the actions to
take in INSUFFICIENT_DATA state
:type ok_actions: list of strs
:param ok_actions: A list of the ARNs of the actions to take in OK state
"""
self.name = name
self.connection = connection
self.metric = metric
self.namespace = namespace
self.statistic = statistic
if threshold is not None:
self.threshold = float(threshold)
else:
self.threshold = None
self.comparison = self._cmp_map.get(comparison)
if period is not None:
self.period = int(period)
else:
self.period = None
if evaluation_periods is not None:
self.evaluation_periods = int(evaluation_periods)
else:
self.evaluation_periods = None
self.actions_enabled = None
self.alarm_arn = None
self.last_updated = None
self.description = description
self.dimensions = dimensions
self.state_reason = None
self.state_value = None
self.unit = unit
self.alarm_actions = alarm_actions
self.insufficient_data_actions = insufficient_data_actions
self.ok_actions = ok_actions
def __repr__(self):
return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric,
self.statistic,
self.comparison,
self.threshold)
def startElement(self, name, attrs, connection):
if name == 'AlarmActions':
self.alarm_actions = ListElement()
return self.alarm_actions
elif name == 'InsufficientDataActions':
self.insufficient_data_actions = ListElement()
return self.insufficient_data_actions
elif name == 'OKActions':
self.ok_actions = ListElement()
return self.ok_actions
elif name == 'Dimensions':
self.dimensions = Dimension()
return self.dimensions
else:
pass
def endElement(self, name, value, connection):
if name == 'ActionsEnabled':
self.actions_enabled = value
elif name == 'AlarmArn':
self.alarm_arn = value
elif name == 'AlarmConfigurationUpdatedTimestamp':
self.last_updated = value
elif name == 'AlarmDescription':
self.description = value
elif name == 'AlarmName':
self.name = value
elif name == 'ComparisonOperator':
setattr(self, 'comparison', self._rev_cmp_map[value])
elif name == 'EvaluationPeriods':
self.evaluation_periods = int(value)
elif name == 'MetricName':
self.metric = value
elif name == 'Namespace':
self.namespace = value
elif name == 'Period':
self.period = int(value)
elif name == 'StateReason':
self.state_reason = value
elif name == 'StateValue':
self.state_value = value
elif name == 'Statistic':
self.statistic = value
elif name == 'Threshold':
self.threshold = float(value)
elif name == 'Unit':
self.unit = value
else:
setattr(self, name, value)
def set_state(self, value, reason, data=None):
""" Temporarily sets the state of an alarm.
:type value: str
:param value: OK | ALARM | INSUFFICIENT_DATA
:type reason: str
:param reason: Reason alarm set (human readable).
:type data: str
:param data: Reason data (will be jsonified).
"""
return self.connection.set_alarm_state(self.name, reason, value, data)
def update(self):
return self.connection.update_alarm(self)
def enable_actions(self):
return self.connection.enable_alarm_actions([self.name])
def disable_actions(self):
return self.connection.disable_alarm_actions([self.name])
def describe_history(self, start_date=None, end_date=None, max_records=None,
history_item_type=None, next_token=None):
return self.connection.describe_alarm_history(self.name, start_date,
end_date, max_records,
history_item_type,
next_token)
def add_alarm_action(self, action_arn=None):
"""
Adds an alarm action, represented as an SNS topic, to this alarm.
What do do when alarm is triggered.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state ALARM.
"""
if not action_arn:
return # Raise exception instead?
self.actions_enabled = 'true'
self.alarm_actions.append(action_arn)
def add_insufficient_data_action(self, action_arn=None):
"""
Adds an insufficient_data action, represented as an SNS topic, to
this alarm. What to do when the insufficient_data state is reached.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state INSUFFICIENT_DATA.
"""
if not action_arn:
return
self.actions_enabled = 'true'
self.insufficient_data_actions.append(action_arn)
def add_ok_action(self, action_arn=None):
"""
Adds an ok action, represented as an SNS topic, to this alarm. What
to do when the ok state is reached.
:type action_arn: str
:param action_arn: SNS topics to which notification should be
sent if the alarm goes to state INSUFFICIENT_DATA.
"""
if not action_arn:
return
self.actions_enabled = 'true'
self.ok_actions.append(action_arn)
def delete(self):
self.connection.delete_alarms([self.name])
class AlarmHistoryItem(object):
def __init__(self, connection=None):
self.connection = connection
def __repr__(self):
return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'HistoryData':
self.data = json.loads(value)
elif name == 'HistoryItemType':
self.tem_type = value
elif name == 'HistorySummary':
self.summary = value
elif name == 'Timestamp':
try:
self.timestamp = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
|
Hoekz/hackness-monster
|
refs/heads/master
|
venv/lib/python2.7/site-packages/click/_compat.py
|
66
|
import re
import io
import os
import sys
import codecs
from weakref import WeakKeyDictionary
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith('win')
DEFAULT_COLUMNS = 80
_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def get_filesystem_encoding():
return sys.getfilesystemencoding() or sys.getdefaultencoding()
def _make_text_stream(stream, encoding, errors):
if encoding is None:
encoding = get_best_encoding(stream)
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(stream, encoding, errors,
line_buffering=True)
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ascii."""
try:
return codecs.lookup(encoding).name == 'ascii'
except LookupError:
return False
def get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __init__(self, stream, encoding, errors, **extra):
self._stream = stream = _FixupStream(stream)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
f = getattr(self._stream, 'read1', None)
if f is not None:
return f(size)
# We only dispatch to readline instead of read in Python 2 as we
# do not want cause problems with the different implementation
# of line buffering.
if PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
x = getattr(self._stream, 'readable', None)
if x is not None:
return x()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
x = getattr(self._stream, 'writable', None)
if x is not None:
return x()
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
if PY2:
text_type = unicode
bytes = str
raw_input = raw_input
string_types = (str, unicode)
iteritems = lambda x: x.iteritems()
range_type = xrange
def is_bytes(x):
return isinstance(x, (buffer, bytearray))
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
# For Windows, we need to force stdout/stdin/stderr to binary if it's
# fetched for that. This obviously is not the most correct way to do
# it as it changes global state. Unfortunately, there does not seem to
# be a clear better way to do it as just reopening the file in binary
# mode does not change anything.
#
# An option would be to do what Python 3 does and to open the file as
# binary only, patch it back to the system, and then use a wrapper
# stream that converts newlines. It's not quite clear what's the
# correct option here.
#
# This code also lives in _winconsole for the fallback to the console
# emulation stream.
if WIN:
import msvcrt
def set_binary_mode(f):
try:
fileno = f.fileno()
except Exception:
pass
else:
msvcrt.setmode(fileno, os.O_BINARY)
return f
else:
set_binary_mode = lambda x: x
def isidentifier(x):
return _identifier_re.search(x) is not None
def get_binary_stdin():
return set_binary_mode(sys.stdin)
def get_binary_stdout():
return set_binary_mode(sys.stdout)
def get_binary_stderr():
return set_binary_mode(sys.stderr)
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _make_text_stream(sys.stderr, encoding, errors)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
return value
else:
import io
text_type = str
raw_input = input
string_types = (str,)
range_type = range
isidentifier = lambda x: x.isidentifier()
iteritems = lambda x: iter(x.items())
def is_bytes(x):
return isinstance(x, (bytes, memoryview, bytearray))
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise, it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set, we need to verify that the
# reader is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding, we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_reader, encoding, errors)
def _force_correct_text_writer(text_writer, encoding, errors):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set, we need to verify that the
# writer is not actually misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding, we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case, mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point, we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _make_text_stream(binary_writer, encoding, errors)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors)
def filename_to_ui(value):
if isinstance(value, bytes):
value = value.decode(get_filesystem_encoding(), 'replace')
else:
value = value.encode('utf-8', 'surrogateescape') \
.decode('utf-8', 'replace')
return value
def get_streerror(e, default=None):
if hasattr(e, 'strerror'):
msg = e.strerror
else:
if default is not None:
msg = default
else:
msg = str(e)
if isinstance(msg, bytes):
msg = msg.decode('utf-8', 'replace')
return msg
def open_stream(filename, mode='r', encoding=None, errors='strict',
atomic=False):
# Standard streams first. These are simple because they don't need
# special handling for the atomic flag. It's entirely ignored.
if filename == '-':
if 'w' in mode:
if 'b' in mode:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if 'b' in mode:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
if encoding is None:
return open(filename, mode), True
return io.open(filename, mode, encoding=encoding, errors=errors), True
# Some usability stuff for atomic writes
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('Atomic writes only make sense with `w`-mode.')
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import tempfile
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
prefix='.__atomic-write')
if encoding is not None:
f = io.open(fd, mode, encoding=encoding, errors=errors)
else:
f = os.fdopen(fd, mode)
return _AtomicFile(f, tmp_filename, filename), True
# Used in a destructor call, needs extra protection from interpreter cleanup.
if hasattr(os, 'replace'):
_replace = os.replace
_can_replace = True
else:
_replace = os.rename
_can_replace = not WIN
class _AtomicFile(object):
def __init__(self, f, tmp_filename, real_filename):
self._f = f
self._tmp_filename = tmp_filename
self._real_filename = real_filename
self.closed = False
@property
def name(self):
return self._real_filename
def close(self, delete=False):
if self.closed:
return
self._f.close()
if not _can_replace:
try:
os.remove(self._real_filename)
except OSError:
pass
_replace(self._tmp_filename, self._real_filename)
self.closed = True
def __getattr__(self, name):
return getattr(self._f, name)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close(delete=exc_type is not None)
def __repr__(self):
return repr(self._f)
auto_wrap_for_ansi = None
colorama = None
get_winterm_size = None
def strip_ansi(value):
return _ansi_re.sub('', value)
def should_strip_ansi(stream=None, color=None):
if color is None:
if stream is None:
stream = sys.stdin
return not isatty(stream)
return not color
# If we're on Windows, we provide transparent integration through
# colorama. This will make ANSI colors through the echo function
# work automatically.
if WIN:
# Windows has a smaller terminal
DEFAULT_COLUMNS = 79
from ._winconsole import _get_windows_console_stream
def _get_argv_encoding():
import locale
return locale.getpreferredencoding()
if PY2:
def raw_input(prompt=''):
sys.stderr.flush()
if prompt:
stdout = _default_text_stdout()
stdout.write(prompt)
stdin = _default_text_stdin()
return stdin.readline().rstrip('\r\n')
try:
import colorama
except ImportError:
pass
else:
_ansi_stream_wrappers = WeakKeyDictionary()
def auto_wrap_for_ansi(stream, color=None):
"""This function wraps a stream so that calls through colorama
are issued to the win32 console API to recolor on demand. It
also ensures to reset the colors if a write call is interrupted
to not destroy the console afterwards.
"""
try:
cached = _ansi_stream_wrappers.get(stream)
except Exception:
cached = None
if cached is not None:
return cached
strip = should_strip_ansi(stream, color)
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
rv = ansi_wrapper.stream
_write = rv.write
def _safe_write(s):
try:
return _write(s)
except:
ansi_wrapper.reset_all()
raise
rv.write = _safe_write
try:
_ansi_stream_wrappers[stream] = rv
except Exception:
pass
return rv
def get_winterm_size():
win = colorama.win32.GetConsoleScreenBufferInfo(
colorama.win32.STDOUT).srWindow
return win.Right - win.Left, win.Bottom - win.Top
else:
def _get_argv_encoding():
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
_get_windows_console_stream = lambda *x: None
def term_len(x):
return len(strip_ansi(x))
def isatty(stream):
try:
return stream.isatty()
except Exception:
return False
def _make_cached_stream_func(src_func, wrapper_func):
cache = WeakKeyDictionary()
def func():
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
cache[stream] = rv
except Exception:
pass
return rv
return func
_default_text_stdin = _make_cached_stream_func(
lambda: sys.stdin, get_text_stdin)
_default_text_stdout = _make_cached_stream_func(
lambda: sys.stdout, get_text_stdout)
_default_text_stderr = _make_cached_stream_func(
lambda: sys.stderr, get_text_stderr)
binary_streams = {
'stdin': get_binary_stdin,
'stdout': get_binary_stdout,
'stderr': get_binary_stderr,
}
text_streams = {
'stdin': get_text_stdin,
'stdout': get_text_stdout,
'stderr': get_text_stderr,
}
|
aaniket/LP
|
refs/heads/master
|
test/test.py
|
2
|
from src.trumpscript.compiler import Compiler
from src.trumpscript.tokenizer import Tokenizer
__author__ = 'github.com/samshadwell'
def test_tokenize_file(filename, expected):
"""
Parse the file and verify that the types are what we expect
:param expected: the expected sequence of type codes coming from the parser
:param filename: the file to read and parse
:return: True indicating the parsed tokens match the expected, false otherwise
"""
tokens = Tokenizer.tokenize(filename)
if len(tokens) != len(expected):
print("Tokens and expected are different lengths\n")
return False
for idx in range(len(expected)):
if tokens[idx]['type'] != expected[idx]:
print("Difference at index: " + str(idx) + "\n")
print("Expected: " + str(expected[idx]))
print("Received: " + str(tokens[idx]))
return False
print("Tokenizer tests pass\n")
return True
def test_compile(filename):
Compiler().compile(filename)
# Bad tests. They break things (but on purpose)
# test_compile("test_files/debatevsdark.txt") # Infinite loop
# test_compile("test_files/not_english.txt") # Not english
# test_compile("test_files/not_patriotic.txt") # America is great error
# test_compile("test_files/nonterm_quote.txt") # Topical error
# The few test files that actually work
test_compile("test_files/math.txt") # Math
test_compile("test_files/debate_vs_rubio.txt") # Simple hello world
test_compile("test_files/debate_vs_hillary.txt")
test_compile("test_files/debatesvdebates.txt") # Complex hello world
test_compile("test_files/haiku.txt") #Haiku
test_compile("test_files/huge_test.tr") #Huuuuge?
test_compile("test_files/fizz_buzz.txt") # Fizzbuzz
# test_tokenize_file("test_files/toupee.txt", [T_Make, T_Word, T_Num,
# T_While, T_LParen, T_Word, T_Less, T_Num, T_RParen,
# T_Print, T_LParen, T_Num, T_Minus, T_Word, T_RParen,
# T_Make, T_Word, T_LParen, T_Word, T_Plus, T_Num, T_RParen])
#
# test_tokenize_file("test_files/test_1.txt", [T_Make, T_Word, T_LParen, T_Not, T_False, T_RParen,
# T_If, T_Word, T_Is, T_True, T_LBrace,
# T_Word, T_Print, T_Word, T_Quote, T_RBrace])
|
rackerlabs/arborlabs_client
|
refs/heads/master
|
solumclient/openstack/common/__init__.py
|
233
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
|
tdr130/zulip
|
refs/heads/master
|
analytics/management/commands/active_user_stats.py
|
116
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.models import UserPresence, UserActivity
from zerver.lib.utils import statsd, statsd_key
from datetime import datetime, timedelta
from collections import defaultdict
class Command(BaseCommand):
help = """Sends active user statistics to statsd.
Run as a cron job that runs every 10 minutes."""
def handle(self, *args, **options):
# Get list of all active users in the last 1 week
cutoff = datetime.now() - timedelta(minutes=30, hours=168)
users = UserPresence.objects.select_related().filter(timestamp__gt=cutoff)
# Calculate 10min, 2hrs, 12hrs, 1day, 2 business days (TODO business days), 1 week bucket of stats
hour_buckets = [0.16, 2, 12, 24, 48, 168]
user_info = defaultdict(dict)
for last_presence in users:
if last_presence.status == UserPresence.IDLE:
known_active = last_presence.timestamp - timedelta(minutes=30)
else:
known_active = last_presence.timestamp
for bucket in hour_buckets:
if not bucket in user_info[last_presence.user_profile.realm.domain]:
user_info[last_presence.user_profile.realm.domain][bucket] = []
if datetime.now(known_active.tzinfo) - known_active < timedelta(hours=bucket):
user_info[last_presence.user_profile.realm.domain][bucket].append(last_presence.user_profile.email)
for realm, buckets in user_info.items():
print("Realm %s" % realm)
for hr, users in sorted(buckets.items()):
print("\tUsers for %s: %s" % (hr, len(users)))
statsd.gauge("users.active.%s.%shr" % (statsd_key(realm, True), statsd_key(hr, True)), len(users))
# Also do stats for how many users have been reading the app.
users_reading = UserActivity.objects.select_related().filter(query="/json/update_message_flags")
user_info = defaultdict(dict)
for activity in users_reading:
for bucket in hour_buckets:
if not bucket in user_info[activity.user_profile.realm.domain]:
user_info[activity.user_profile.realm.domain][bucket] = []
if datetime.now(activity.last_visit.tzinfo) - activity.last_visit < timedelta(hours=bucket):
user_info[activity.user_profile.realm.domain][bucket].append(activity.user_profile.email)
for realm, buckets in user_info.items():
print("Realm %s" % realm)
for hr, users in sorted(buckets.items()):
print("\tUsers reading for %s: %s" % (hr, len(users)))
statsd.gauge("users.reading.%s.%shr" % (statsd_key(realm, True), statsd_key(hr, True)), len(users))
|
timlinux/inasafe
|
refs/heads/develop
|
safe/metadata/metadata_db_io.py
|
2
|
# coding=utf-8
"""Metadata DB IO implementation."""
import logging
import os
import sqlite3 as sqlite
from sqlite3 import OperationalError
# noinspection PyPackageRequirements
from PyQt4.QtCore import QObject
from safe.common.exceptions import (
HashNotFoundError, UnsupportedProviderError)
from safe.definitions.default_settings import inasafe_default_settings
from safe.utilities.settings import setting
__copyright__ = "Copyright 2015, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
LOGGER = logging.getLogger('InaSAFE')
class MetadataDbIO(QObject):
"""Class for doing metadata read/write operations on the local DB
The local sqlite db is used for supporting metadata for remote
datasources.
.. versionadded:: 3.2
"""
def __init__(self):
"""Constructor for the metadataDbIO object."""
QObject.__init__(self)
# path to sqlite db path
self.metadata_db_path = None
self.setup_metadata_db_path()
self.connection = None
def set_metadata_db_path(self, path):
"""Set the path for the metadata database (sqlite).
The file will be used to search for metadata for non local datasets.
:param path: A valid path to a sqlite database. The database does
not need to exist already, but the user should be able to write
to the path provided.
:type path: str
"""
self.metadata_db_path = str(path)
# methods below here should be considered private
@staticmethod
def default_metadata_db_path():
"""Helper to get the default path for the metadata file.
:returns: The path to where the default location of the metadata
database is. It get from the default setting
:rtype: str
"""
return inasafe_default_settings['keywordCachePath']
def setup_metadata_db_path(self):
"""Helper to set the active path for the metadata.
Called at init time, you can override this path by calling
set_metadata_db_path.setmetadataDbPath.
:returns: The path to where the metadata file is. If the user has
never specified what this path is, the defaultmetadataDbPath is
returned.
:rtype: str
"""
self.metadata_db_path = str(
setting('keywordCachePath', expected_type=unicode))
def open_connection(self):
"""Open an sqlite connection to the metadata database.
By default the metadata database will be used in the plugin dir,
unless an explicit path has been set using setmetadataDbPath, or
overridden in QSettings. If the db does not exist it will
be created.
:raises: An sqlite.Error is raised if anything goes wrong
"""
self.connection = None
base_directory = os.path.dirname(self.metadata_db_path)
if not os.path.exists(base_directory):
try:
os.mkdir(base_directory)
except IOError:
LOGGER.exception(
'Could not create directory for metadata cache.')
raise
try:
self.connection = sqlite.connect(self.metadata_db_path)
except (OperationalError, sqlite.Error):
LOGGER.exception('Failed to open metadata cache database.')
raise
def close_connection(self):
"""Close the active sqlite3 connection."""
if self.connection is not None:
self.connection.close()
self.connection = None
def get_cursor(self):
"""Get a cursor for the active connection.
The cursor can be used to execute arbitrary queries against the
database. This method also checks that the metadata table exists in
the schema, and if not, it creates it.
:returns: A valid cursor opened against the connection.
:rtype: sqlite.
:raises: An sqlite.Error will be raised if anything goes wrong.
"""
if self.connection is None:
try:
self.open_connection()
except OperationalError:
raise
try:
cursor = self.connection.cursor()
cursor.execute('SELECT SQLITE_VERSION()')
data = cursor.fetchone()
LOGGER.debug("SQLite version: %s" % data)
# Check if we have some tables, if not create them
sql = 'select sql from sqlite_master where type = \'table\';'
cursor.execute(sql)
data = cursor.fetchone()
LOGGER.debug("Tables: %s" % data)
if data is None:
LOGGER.debug('No tables found')
sql = (
'create table metadata (hash varchar(32) primary key,'
'json text, xml text);')
LOGGER.debug(sql)
cursor.execute(sql)
# data = cursor.fetchone()
cursor.fetchone()
else:
LOGGER.debug('metadata table already exists')
return cursor
except sqlite.Error, e:
LOGGER.debug("Error %s:" % e.args[0])
raise
@staticmethod
def are_metadata_file_based(layer):
"""Check if metadata should be read/written to file or our metadata db.
Determine which metadata lookup system to use (file base or cache db)
based on the layer's provider type. True indicates we should use the
datasource as a file and look for a metadata file, False and we look
in the metadata db.
:param layer: The layer which want to know how the metadata are stored.
:type layer: QgsMapLayer
:returns: True if metadata are stored in a file next to the dataset,
else False if the dataset is remove e.g. a database.
:rtype: bool
:raises: UnsupportedProviderError
"""
try:
provider_type = str(layer.providerType())
except AttributeError:
raise UnsupportedProviderError(
'Could not determine type for provider: %s' %
layer.__class__.__name__)
provider_dict = {
'ogr': True,
'gdal': True,
'gpx': False,
'wms': False,
'spatialite': False,
'delimitedtext': False,
'postgres': False}
file_based_metadata = False
if provider_type in provider_dict:
file_based_metadata = provider_dict[provider_type]
return file_based_metadata
@staticmethod
def hash_for_datasource(data_source):
"""Given a data_source, return its hash.
:param data_source: The data_source name from a layer.
:type data_source: str
:returns: An md5 hash for the data source name.
:rtype: str
"""
import hashlib
hash_value = hashlib.md5()
hash_value.update(data_source)
hash_value = hash_value.hexdigest()
return hash_value
def delete_metadata_for_uri(self, uri):
"""Delete metadata for a URI in the metadata database.
A hash will be constructed from the supplied uri and a lookup made
in a local SQLITE database for the metadata. If there is an existing
record for the hash, the entire record will be erased.
.. seealso:: write_metadata_for_uri, read_metadata_for_uri
:param uri: A layer uri. e.g. ```dbname=\'osm\' host=localhost
port=5432 user=\'foo\'password=\'bar\' sslmode=disable key=\'id\'
srid=4326```
:type uri: str
"""
hash_value = self.hash_for_datasource(uri)
try:
cursor = self.get_cursor()
# now see if we have any data for our hash
sql = 'delete from metadata where hash = \'' + hash_value + '\';'
cursor.execute(sql)
self.connection.commit()
except sqlite.Error, e:
LOGGER.debug("SQLITE Error %s:" % e.args[0])
self.connection.rollback()
except Exception, e:
LOGGER.debug("Error %s:" % e.args[0])
self.connection.rollback()
raise
finally:
self.close_connection()
def write_metadata_for_uri(self, uri, json=None, xml=None):
"""Write metadata for a URI into the metadata database. All the
metadata for the uri should be written in a single operation.
A hash will be constructed from the supplied uri and a lookup made
in a local SQLite database for the metadata. If there is an existing
record it will be updated, if not, a new one will be created.
.. seealso:: read_metadata_from_uri, delete_metadata_for_uri
:param uri: A layer uri. e.g. ```dbname=\'osm\' host=localhost
port=5432 user=\'foo\' password=\'bar\' sslmode=disable
key=\'id\' srid=4326```
:type uri: str
:param json: The metadata to write (which should be provided as a
JSON str).
:type json: str
:param xml: The metadata to write (which should be provided as a
XML str).
:type xml: str
"""
hash_value = self.hash_for_datasource(uri)
try:
cursor = self.get_cursor()
# now see if we have any data for our hash
sql = (
'select json, xml from metadata where hash = \'%s\';' %
hash_value)
cursor.execute(sql)
data = cursor.fetchone()
if data is None:
# insert a new rec
# cursor.execute('insert into metadata(hash) values(:hash);',
# {'hash': hash_value})
cursor.execute(
'insert into metadata(hash, json, xml ) '
'values(:hash, :json, :xml);',
{'hash': hash_value, 'json': json, 'xml': xml})
self.connection.commit()
else:
# update existing rec
cursor.execute(
'update metadata set json=?, xml=? where hash = ?;',
(json, xml, hash_value))
self.connection.commit()
except sqlite.Error:
LOGGER.exception('Error writing metadata to SQLite db %s' %
self.metadata_db_path)
# See if we can roll back.
if self.connection is not None:
self.connection.rollback()
raise
finally:
self.close_connection()
def read_metadata_from_uri(self, uri, metadata_format):
"""Try to get metadata from the DB entry associated with a URI.
This is used for layers that are non local layer (e.g. postgresql
connection) and so we need to retrieve the metadata from the sqlite
metadata db.
A hash will be constructed from the supplied uri and a lookup made
in a local SQLITE database for the metadata. If there is an existing
record it will be returned, if not and error will be thrown.
.. seealso:: write_metadata_for_uri, delete_metadata_for_uri
:param uri: A layer uri. e.g. ```dbname=\'osm\' host=localhost
port=5432 user=\'foo\' password=\'bar\' sslmode=disable
key=\'id\' srid=4326```
:type uri: str
:param metadata_format: The format of the metadata to retrieve.
Valid types are: 'json', 'xml'
:type metadata_format: str
:returns: A string containing the retrieved metadata
:raises: metadataNotFoundError if the metadata is not found.
"""
allowed_formats = ['json', 'xml']
if metadata_format not in allowed_formats:
message = 'Metadata format %s is not valid. Valid types: %s' % (
metadata_format, allowed_formats)
raise RuntimeError('%s' % message)
hash_value = self.hash_for_datasource(uri)
try:
self.open_connection()
except OperationalError:
raise
try:
cursor = self.get_cursor()
# now see if we have any data for our hash
sql = (
'select %s from metadata where hash = \'%s\';' % (
metadata_format, hash_value))
cursor.execute(sql)
data = cursor.fetchone()
if data is None:
raise HashNotFoundError('No hash found for %s' % hash_value)
data = data[0] # first field
# get the ISO out of the DB
metadata = str(data)
return metadata
except sqlite.Error, e:
LOGGER.debug("Error %s:" % e.args[0])
except Exception, e:
LOGGER.debug("Error %s:" % e.args[0])
raise
finally:
self.close_connection()
|
CUCWD/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/oauth_dispatch/views.py
|
8
|
"""
Views that dispatch processing of OAuth requests to django-oauth2-provider or
django-oauth-toolkit as appropriate.
"""
from __future__ import unicode_literals
import hashlib
import json
from Cryptodome.PublicKey import RSA
from django.conf import settings
from django.urls import reverse
from django.http import JsonResponse
from django.views.generic import View
from edx_oauth2_provider import views as dop_views # django-oauth2-provider views
from jwkest.jwk import RSAKey
from oauth2_provider import models as dot_models # django-oauth-toolkit
from oauth2_provider import views as dot_views
from ratelimit import ALL
from ratelimit.mixins import RatelimitMixin
from openedx.core.djangoapps.auth_exchange import views as auth_exchange_views
from openedx.core.lib.token_utils import JwtBuilder
from . import adapters
from .dot_overrides import views as dot_overrides_views
from .toggles import ENFORCE_JWT_SCOPES
class _DispatchingView(View):
"""
Base class that route views to the appropriate provider view. The default
behavior routes based on client_id, but this can be overridden by redefining
`select_backend()` if particular views need different behavior.
"""
# pylint: disable=no-member
dot_adapter = adapters.DOTAdapter()
dop_adapter = adapters.DOPAdapter()
def get_adapter(self, request):
"""
Returns the appropriate adapter based on the OAuth client linked to the request.
"""
if dot_models.Application.objects.filter(client_id=self._get_client_id(request)).exists():
return self.dot_adapter
else:
return self.dop_adapter
def dispatch(self, request, *args, **kwargs):
"""
Dispatch the request to the selected backend's view.
"""
backend = self.select_backend(request)
view = self.get_view_for_backend(backend)
return view(request, *args, **kwargs)
def select_backend(self, request):
"""
Given a request that specifies an oauth `client_id`, return the adapter
for the appropriate OAuth handling library. If the client_id is found
in a django-oauth-toolkit (DOT) Application, use the DOT adapter,
otherwise use the django-oauth2-provider (DOP) adapter, and allow the
calls to fail normally if the client does not exist.
"""
return self.get_adapter(request).backend
def get_view_for_backend(self, backend):
"""
Return the appropriate view from the requested backend.
"""
if backend == self.dot_adapter.backend:
return self.dot_view.as_view()
elif backend == self.dop_adapter.backend:
return self.dop_view.as_view()
else:
raise KeyError('Failed to dispatch view. Invalid backend {}'.format(backend))
def _get_client_id(self, request):
"""
Return the client_id from the provided request
"""
if request.method == u'GET':
return request.GET.get('client_id')
else:
return request.POST.get('client_id')
class AccessTokenView(RatelimitMixin, _DispatchingView):
"""
Handle access token requests.
"""
dot_view = dot_views.TokenView
dop_view = dop_views.AccessTokenView
ratelimit_key = 'openedx.core.djangoapps.util.ratelimit.real_ip'
ratelimit_rate = settings.RATELIMIT_RATE
ratelimit_block = True
ratelimit_method = ALL
def dispatch(self, request, *args, **kwargs):
response = super(AccessTokenView, self).dispatch(request, *args, **kwargs)
if response.status_code == 200 and request.POST.get('token_type', '').lower() == 'jwt':
client_id = self._get_client_id(request)
adapter = self.get_adapter(request)
expires_in, scopes, user = self._decompose_access_token_response(adapter, response)
issuer, secret, audience, filters, is_client_restricted = self._get_client_specific_claims(
client_id,
adapter
)
content = {
'access_token': JwtBuilder(
user,
secret=secret,
issuer=issuer,
).build_token(
scopes,
expires_in,
aud=audience,
additional_claims={
'filters': filters,
'is_restricted': is_client_restricted,
},
),
'expires_in': expires_in,
'token_type': 'JWT',
'scope': ' '.join(scopes),
}
response.content = json.dumps(content)
return response
def _decompose_access_token_response(self, adapter, response):
""" Decomposes the access token in the request to an expiration date, scopes, and User. """
content = json.loads(response.content)
access_token = content['access_token']
scope = content['scope']
scopes = scope.split(' ')
user = adapter.get_access_token(access_token).user
expires_in = content['expires_in']
return expires_in, scopes, user
def _get_client_specific_claims(self, client_id, adapter):
""" Get claims that are specific to the client. """
# If JWT scope enforcement is enabled, we need to sign tokens
# given to restricted application with a separate secret which
# other IDAs do not have access to. This prevents restricted
# applications from getting access to API endpoints available
# on other IDAs which have not yet been protected with the
# scope-related DRF permission classes. Once all endpoints have
# been protected we can remove this if/else and go back to using
# a single secret.
# TODO: ARCH-162
is_client_restricted = adapter.is_client_restricted(client_id)
if ENFORCE_JWT_SCOPES.is_enabled() and is_client_restricted:
issuer_setting = 'RESTRICTED_APPLICATION_JWT_ISSUER'
else:
issuer_setting = 'DEFAULT_JWT_ISSUER'
jwt_issuer = getattr(settings, issuer_setting)
filters = adapter.get_authorization_filters(client_id)
return jwt_issuer['ISSUER'], jwt_issuer['SECRET_KEY'], jwt_issuer['AUDIENCE'], filters, is_client_restricted
class AuthorizationView(_DispatchingView):
"""
Part of the authorization flow.
"""
dop_view = dop_views.Capture
dot_view = dot_overrides_views.EdxOAuth2AuthorizationView
class AccessTokenExchangeView(_DispatchingView):
"""
Exchange a third party auth token.
"""
dop_view = auth_exchange_views.DOPAccessTokenExchangeView
dot_view = auth_exchange_views.DOTAccessTokenExchangeView
class RevokeTokenView(_DispatchingView):
"""
Dispatch to the RevokeTokenView of django-oauth-toolkit
"""
dot_view = dot_views.RevokeTokenView
class ProviderInfoView(View):
def get(self, request, *args, **kwargs):
data = {
'issuer': settings.JWT_AUTH['JWT_ISSUER'],
'authorization_endpoint': request.build_absolute_uri(reverse('authorize')),
'token_endpoint': request.build_absolute_uri(reverse('access_token')),
'end_session_endpoint': request.build_absolute_uri(reverse('logout')),
'token_endpoint_auth_methods_supported': ['client_secret_post'],
# NOTE (CCB): This is not part of the OpenID Connect standard. It is added here since we
# use JWS for our access tokens.
'access_token_signing_alg_values_supported': ['RS512', 'HS256'],
'scopes_supported': ['openid', 'profile', 'email'],
'claims_supported': ['sub', 'iss', 'name', 'given_name', 'family_name', 'email'],
'jwks_uri': request.build_absolute_uri(reverse('jwks')),
}
response = JsonResponse(data)
return response
class JwksView(View):
@staticmethod
def serialize_rsa_key(key):
kid = hashlib.md5(key.encode('utf-8')).hexdigest()
key = RSAKey(kid=kid, key=RSA.importKey(key), use='sig', alg='RS512')
return key.serialize(private=False)
def get(self, request, *args, **kwargs):
secret_keys = []
if settings.JWT_PRIVATE_SIGNING_KEY:
secret_keys.append(settings.JWT_PRIVATE_SIGNING_KEY)
# NOTE: We provide the expired keys in case there are unexpired access tokens
# that need to have their signatures verified.
if settings.JWT_EXPIRED_PRIVATE_SIGNING_KEYS:
secret_keys += settings.JWT_EXPIRED_PRIVATE_SIGNING_KEYS
return JsonResponse({
'keys': [self.serialize_rsa_key(key) for key in secret_keys if key],
})
|
hubert667/AIR
|
refs/heads/master
|
build/celery/build/lib.linux-i686-2.7/celery/tests/contrib/test_rdb.py
|
2
|
from __future__ import absolute_import
import errno
import socket
from celery.contrib.rdb import (
Rdb,
debugger,
set_trace,
)
from celery.tests.case import Case, Mock, WhateverIO, patch, skip_if_pypy
class SockErr(socket.error):
errno = None
class test_Rdb(Case):
@patch('celery.contrib.rdb.Rdb')
def test_debugger(self, Rdb):
x = debugger()
self.assertTrue(x)
self.assertIs(x, debugger())
@patch('celery.contrib.rdb.debugger')
@patch('celery.contrib.rdb._frame')
def test_set_trace(self, _frame, debugger):
self.assertTrue(set_trace(Mock()))
self.assertTrue(set_trace())
self.assertTrue(debugger.return_value.set_trace.called)
@patch('celery.contrib.rdb.Rdb.get_avail_port')
@skip_if_pypy
def test_rdb(self, get_avail_port):
sock = Mock()
get_avail_port.return_value = (sock, 8000)
sock.accept.return_value = (Mock(), ['helu'])
out = WhateverIO()
rdb = Rdb(out=out)
self.assertTrue(get_avail_port.called)
self.assertIn('helu', out.getvalue())
# set_quit
with patch('sys.settrace') as settrace:
rdb.set_quit()
settrace.assert_called_with(None)
# set_trace
with patch('celery.contrib.rdb.Pdb.set_trace') as pset:
with patch('celery.contrib.rdb._frame'):
rdb.set_trace()
rdb.set_trace(Mock())
pset.side_effect = SockErr
pset.side_effect.errno = errno.ECONNRESET
rdb.set_trace()
pset.side_effect.errno = errno.ENOENT
with self.assertRaises(SockErr):
rdb.set_trace()
# _close_session
rdb._close_session()
# do_continue
rdb.set_continue = Mock()
rdb.do_continue(Mock())
rdb.set_continue.assert_called_with()
# do_quit
rdb.set_quit = Mock()
rdb.do_quit(Mock())
rdb.set_quit.assert_called_with()
@patch('socket.socket')
@skip_if_pypy
def test_get_avail_port(self, sock):
out = WhateverIO()
sock.return_value.accept.return_value = (Mock(), ['helu'])
Rdb(out=out)
with patch('celery.contrib.rdb.current_process') as curproc:
curproc.return_value.name = 'PoolWorker-10'
Rdb(out=out)
err = sock.return_value.bind.side_effect = SockErr()
err.errno = errno.ENOENT
with self.assertRaises(SockErr):
Rdb(out=out)
err.errno = errno.EADDRINUSE
with self.assertRaises(Exception):
Rdb(out=out)
called = [0]
def effect(*a, **kw):
try:
if called[0] > 50:
return True
raise err
finally:
called[0] += 1
sock.return_value.bind.side_effect = effect
Rdb(out=out)
|
mkieszek/odoo
|
refs/heads/master
|
addons/website_membership/controllers/__init__.py
|
7372
|
import main
|
jejimenez/django
|
refs/heads/master
|
tests/migrations/models.py
|
386
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class CustomModelBase(models.base.ModelBase):
pass
class ModelWithCustomBase(six.with_metaclass(CustomModelBase, models.Model)):
pass
@python_2_unicode_compatible
class UnicodeModel(models.Model):
title = models.CharField('ÚÑÍ¢ÓÐÉ', max_length=20, default='“Ðjáñgó”')
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
verbose_name = 'úñí©óðé µóðéø'
verbose_name_plural = 'úñí©óðé µóðéøß'
def __str__(self):
return self.title
class Unserializable(object):
"""
An object that migration doesn't know how to serialize.
"""
pass
class UnserializableModel(models.Model):
title = models.CharField(max_length=20, default=Unserializable())
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
class UnmigratedModel(models.Model):
"""
A model that is in a migration-less app (which this app is
if its migrations directory has not been repointed)
"""
pass
class EmptyManager(models.Manager):
use_in_migrations = True
class FoodQuerySet(models.query.QuerySet):
pass
class BaseFoodManager(models.Manager):
def __init__(self, a, b, c=1, d=2):
super(BaseFoodManager, self).__init__()
self.args = (a, b, c, d)
class FoodManager(BaseFoodManager.from_queryset(FoodQuerySet)):
use_in_migrations = True
class NoMigrationFoodManager(BaseFoodManager.from_queryset(FoodQuerySet)):
pass
|
hip-odoo/odoo
|
refs/heads/10.0
|
addons/website_slides/models/res_config.py
|
24
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class website_config_settings(models.TransientModel):
_inherit = "website.config.settings"
website_slide_google_app_key = fields.Char(string='Google Doc Key')
@api.model
def get_default_website_slide_google_app_key(self, fields):
website_slide_google_app_key = False
if 'website_slide_google_app_key' in fields:
website_slide_google_app_key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
return {
'website_slide_google_app_key': website_slide_google_app_key
}
@api.multi
def set_website_slide_google_app_key(self):
for wizard in self:
self.env['ir.config_parameter'].sudo().set_param('website_slides.google_app_key', wizard.website_slide_google_app_key)
|
theiviaxx/Frog
|
refs/heads/master
|
frog/views/badge.py
|
1
|
##################################################################################################
# Copyright (c) 2020 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
import json
import pathlib
from django.http import JsonResponse, RawPostDataException
from django.views import View
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import login_required, permission_required
from frog.common import Result
from frog.models import Badge, Tag
from frog import getRoot
from frog.uploader import handle_uploaded_file
@login_required
@require_http_methods(["GET", "POST", "DELETE"])
def index(request, badge_id=None):
if request.method == "GET":
return get(request)
elif request.method == "POST":
return post(request)
elif request.method == "DELETE":
return delete(request, badge_id)
def get(request):
res = Result()
for badge in Badge.objects.all():
res.append(badge.json())
return JsonResponse(res.asDict())
@permission_required('frog.change_badge')
def post(request):
res = Result()
data = json.loads(request.POST["body"])
tag = Tag.objects.get(name=data['tag'])
badge = Badge.objects.get_or_create(tag=tag)[0]
if request.FILES.get("image"):
incomingfilename = pathlib.Path(request.FILES["image"].name)
filename = '{}{}'.format(tag.name, incomingfilename.suffix)
dest = getRoot() / "badges" / filename
if not dest.parent.exists():
dest.parent.makedirs_p()
handle_uploaded_file(dest, request.FILES["image"])
badge.image = "badges/{}".format(filename)
if badge:
badge.save()
res.append(badge.json())
else:
res.isError = True
res.message = "No badge found"
return JsonResponse(res.asDict())
@permission_required('forg.change_badge')
def delete(request, badge_id):
res = Result()
badge = Badge.objects.get(pk=badge_id)
badge.delete()
return JsonResponse(res.asDict())
|
dpiers/coderang-meteor
|
refs/heads/master
|
public/jsrepl/extern/python/reloop-closured/lib/python2.7/antigravity.py
|
235
|
import webbrowser
webbrowser.open("http://xkcd.com/353/")
|
rogerthat-platform/gae-plugin-framework
|
refs/heads/master
|
src/framework/server/framework/wsgi.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
import time
from Cookie import BaseCookie
from urllib import urlencode
import webapp2
from google.appengine.api import urlfetch
from bizz.session import validate_session
from framework.bizz.authentication import set_user
from framework.errors.plugins import PluginNotFoundException
from framework.i18n_utils import get_available_languages, DEFAULT_LANGUAGE
from framework.plugin_loader import get_auth_plugin
from framework.setup_functions import start_suppressing
from framework.utils.cookie import parse_cookie
from mcfw.consts import DEBUG
AUTH_LVL_PUBLIC = 'public'
AUTH_LVL_PRIVATE = 'private'
AUTH_LVL_ADMIN = 'admin'
AUTH_LVLS = (AUTH_LVL_PUBLIC, AUTH_LVL_PRIVATE, AUTH_LVL_ADMIN)
# Copied from webob.Request
def _get_cookies(environ):
"""
Return a *plain* dictionary of cookies as found in the request.
"""
source = environ.get('HTTP_COOKIE', '')
if 'webob._parsed_cookies' in environ:
vars_, var_source = environ['webob._parsed_cookies']
if var_source == source:
return vars_
vars_ = {}
if source:
cookies = BaseCookie()
cookies.load(source)
for name in cookies:
vars_[name] = cookies[name].value
environ['webob._parsed_cookies'] = (vars_, source)
return vars_
# End Copied from webob.Request
def _get_browser_language(environ):
lang = environ.get('HTTP_ACCEPT_LANGUAGE', DEFAULT_LANGUAGE)
if lang not in get_available_languages():
lang = DEFAULT_LANGUAGE
return lang
class WSGIApplication(webapp2.WSGIApplication):
def __init__(self, handlers, name=None, auth=None):
super(WSGIApplication, self).__init__(handlers, debug=DEBUG)
self.name = name
self.auth = auth
def __call__(self, environ, start_response):
return self._call_in_context(environ, start_response)
def _call_in_context(self, environ, start_response):
if not self.auth:
raise Exception('Authentication level not set for %s', environ['PATH_INFO'])
if self.auth not in AUTH_LVLS:
raise Exception('Unknown authentication level %s for %s', self.auth, environ['PATH_INFO'])
session_, user_id = self.get_user(environ)
if self.auth == AUTH_LVL_PRIVATE:
if not user_id:
return self.authenticate(environ, start_response)
browser_language = _get_browser_language(environ)
set_user(user_id, session_, u'%s' % browser_language)
if environ['PATH_INFO'] == '/_ah/queue/deferred':
start_suppressing()
urlfetch.set_default_fetch_deadline(30)
if DEBUG:
start_time = time.time()
result = webapp2.WSGIApplication.__call__(self, environ, start_response)
took_time = time.time() - start_time
logging.info('{0} - {1} - {2:.3f}s'.format(environ['PATH_INFO'], self.name, took_time))
return result
return webapp2.WSGIApplication.__call__(self, environ, start_response)
def get_user(self, environ):
cookies = _get_cookies(environ)
try:
auth_plugin = get_auth_plugin()
# Session handled by auth plugin (optional)
session = auth_plugin.get_session(environ)
if session:
return session, session.user_id
# Fallback to cookie authentication in case auth plugin didn't return a session
if not auth_plugin.get_cookie_name() in cookies:
return None, None
except PluginNotFoundException:
return None, None
secret = parse_cookie(cookies[auth_plugin.get_cookie_name()])
if not secret:
return None, None
session_, user_id = validate_session(secret)
if not session_:
return None, None
return session_, user_id
def authenticate(self, environ, start_response):
query_string = environ['QUERY_STRING'] if environ['QUERY_STRING'] else environ['PATH_INFO']
environ['QUERY_STRING'] = urlencode((('continue', environ['PATH_INFO'] + '?' + query_string),))
environ['PATH_INFO'] = u'/login_required'
logging.info('Not authenticated redirecting to %s', environ['PATH_INFO'])
return webapp2.WSGIApplication.__call__(self, environ, start_response)
|
saknis/upelis
|
refs/heads/master
|
siteoff2.py
|
1
|
#!/usr/bin/env python
#
import os
import datetime
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import datastore
from google.appengine.api import datastore_types
class DinCode(db.Model):
user = db.UserProperty()
time = db.DateTimeProperty(auto_now_add=True)
ipadresas = db.StringProperty()
codename = db.StringProperty(required=True)
codetext = db.TextProperty()
try:
codelist = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "start")
for code in codelist:
code.delete()
codelist = db.GqlQuery("SELECT * FROM DinCode WHERE codename = :1", "disable")
for code in codelist:
code.delete()
except:
klaida=True
now = datetime.datetime.now()
code = DinCode(codename="start")
code.time = now
code.ipadresas = os.environ['REMOTE_ADDR']
code.codetext = "False"
code.user = users.get_current_user()
code.put()
coded = DinCode(codename="disable")
coded.time = now
coded.ipadresas = os.environ['REMOTE_ADDR']
coded.codetext = "<html><body>Disable</body><html>"
coded.user = users.get_current_user()
coded.put()
|
klusark/android_external_chromium_org
|
refs/heads/cm-11.0
|
tools/telemetry/unittest_data/discoverable_classes/dummy_profile_creator.py
|
29
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import profile_creator
class DummyProfileCreator(profile_creator.ProfileCreator):
def CreateProfile(self):
pass
|
comicxmz001/LeetCode
|
refs/heads/master
|
Python/74_Search2DMatrix.py
|
1
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if len(matrix) == 0:
return False
if len(matrix[0]) == 0:
return False
row = 0
col = len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] < target:
row += 1
elif matrix[row][col] > target:
col -= 1
else:
return True
return False
n = 51
m = [[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]]
print Solution().searchMatrix(m,n)
|
alessandrod/txloadbalancer
|
refs/heads/master
|
txlb/test/__init__.py
|
12133432
| |
shubhdev/openedx
|
refs/heads/master
|
common/djangoapps/external_auth/__init__.py
|
12133432
| |
tbeadle/django
|
refs/heads/master
|
tests/backends/__init__.py
|
12133432
| |
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/testData/formatter/beforeTopLevelClass.py
|
83
|
from unittest import TestCase
class MyTest(TestCase):
def test_pass(self):
self.assertEqual(1 + 1, 2)
|
shridharmishra4/rename
|
refs/heads/master
|
newdesign/new.py
|
1
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'new.ui'
#
# Created: Tue Sep 24 04:55:01 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(501, 662)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.lineEdit = QtGui.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(0, 0, 401, 31))
self.lineEdit.setReadOnly(True)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.browse = QtGui.QPushButton(self.centralwidget)
self.browse.setGeometry(QtCore.QRect(400, 0, 101, 27))
self.browse.setFlat(True)
self.browse.setObjectName(_fromUtf8("browse"))
self.scrollold = QtGui.QScrollArea(self.centralwidget)
self.scrollold.setGeometry(QtCore.QRect(0, 30, 251, 561))
self.scrollold.setWidgetResizable(True)
self.scrollold.setObjectName(_fromUtf8("scrollold"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 249, 559))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.scrollold.setWidget(self.scrollAreaWidgetContents)
self.verticalScrollBar = QtGui.QScrollBar(self.centralwidget)
self.verticalScrollBar.setGeometry(QtCore.QRect(250, 30, 21, 561))
self.verticalScrollBar.setOrientation(QtCore.Qt.Vertical)
self.verticalScrollBar.setObjectName(_fromUtf8("verticalScrollBar"))
self.scrollnew = QtGui.QScrollArea(self.centralwidget)
self.scrollnew.setGeometry(QtCore.QRect(270, 30, 231, 561))
self.scrollnew.setWidgetResizable(True)
self.scrollnew.setObjectName(_fromUtf8("scrollnew"))
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 229, 559))
self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
self.scrollnew.setWidget(self.scrollAreaWidgetContents_2)
self.rename = QtGui.QPushButton(self.centralwidget)
self.rename.setGeometry(QtCore.QRect(0, 590, 501, 27))
self.rename.setFlat(True)
self.rename.setObjectName(_fromUtf8("rename"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 501, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.lineEdit, QtCore.SIGNAL(_fromUtf8("textChanged(QString)")), self.lineEdit.selectAll)
QtCore.QObject.connect(self.browse, QtCore.SIGNAL(_fromUtf8("clicked()")), self.browse.show)
QtCore.QObject.connect(self.rename, QtCore.SIGNAL(_fromUtf8("clicked()")), self.rename.click)
QtCore.QObject.connect(self.rename, QtCore.SIGNAL(_fromUtf8("clicked()")), self.rename.show)
QtCore.QObject.connect(self.scrollnew, QtCore.SIGNAL(_fromUtf8("customContextMenuRequested(QPoint)")), self.scrollnew.show)
QtCore.QObject.connect(self.scrollold, QtCore.SIGNAL(_fromUtf8("customContextMenuRequested(QPoint)")), self.scrollold.show)
QtCore.QObject.connect(self.verticalScrollBar, QtCore.SIGNAL(_fromUtf8("sliderMoved(int)")), self.verticalScrollBar.show)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Renamer!", None))
self.browse.setText(_translate("MainWindow", "Browse", None))
self.rename.setText(_translate("MainWindow", "Rename", None))
|
xiandiancloud/edxplaltfom-xusong
|
refs/heads/master
|
lms/djangoapps/courseware/features/certificates.py
|
20
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from lettuce.django import django_url
from course_modes.models import CourseMode
from nose.tools import assert_equal
UPSELL_LINK_CSS = '.message-upsell a.action-upgrade[href*="edx/999/Certificates"]'
def create_cert_course():
world.clear_courses()
org = 'edx'
number = '999'
name = 'Certificates'
course_id = '{org}/{number}/{name}'.format(
org=org, number=number, name=name)
world.scenario_dict['course_id'] = course_id
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org=org, number=number, display_name=name)
audit_mode = world.CourseModeFactory.create(
course_id=course_id,
mode_slug='audit',
mode_display_name='audit course',
min_price=0,
)
assert isinstance(audit_mode, CourseMode)
verfied_mode = world.CourseModeFactory.create(
course_id=course_id,
mode_slug='verified',
mode_display_name='verified cert course',
min_price=16,
suggested_prices='32,64,128',
currency='usd',
)
assert isinstance(verfied_mode, CourseMode)
def register():
url = 'courses/{org}/{number}/{name}/about'.format(
org='edx', number='999', name='Certificates')
world.browser.visit(django_url(url))
world.css_click('section.intro a.register')
assert world.is_css_present('section.wrapper h3.title')
@step(u'the course has an honor mode')
def the_course_has_an_honor_mode(step):
create_cert_course()
honor_mode = world.CourseModeFactory.create(
course_id=world.scenario_dict['course_id'],
mode_slug='honor',
mode_display_name='honor mode',
min_price=0,
)
assert isinstance(honor_mode, CourseMode)
@step(u'I select the audit track$')
def select_the_audit_track(step):
create_cert_course()
register()
btn_css = 'input[value="Select Audit"]'
world.wait(1) # TODO remove this after troubleshooting JZ
world.css_find(btn_css)
world.css_click(btn_css)
def select_contribution(amount=32):
radio_css = 'input[value="{}"]'.format(amount)
world.css_click(radio_css)
assert world.css_find(radio_css).selected
def click_verified_track_button():
world.wait_for_ajax_complete()
btn_css = 'input[value="Select Certificate"]'
world.css_click(btn_css)
@step(u'I select the verified track for upgrade')
def select_verified_track_upgrade(step):
select_contribution(32)
world.wait_for_ajax_complete()
btn_css = 'input[value="Upgrade Your Registration"]'
world.css_click(btn_css)
# TODO: might want to change this depending on the changes for upgrade
assert world.is_css_present('section.progress')
@step(u'I select the verified track$')
def select_the_verified_track(step):
create_cert_course()
register()
select_contribution(32)
click_verified_track_button()
assert world.is_css_present('section.progress')
@step(u'I should see the course on my dashboard$')
def should_see_the_course_on_my_dashboard(step):
course_css = 'li.course-item'
assert world.is_css_present(course_css)
@step(u'I go to step "([^"]*)"$')
def goto_next_step(step, step_num):
btn_css = {
'1': '#face_next_button',
'2': '#face_next_link',
'3': '#photo_id_next_link',
'4': '#pay_button',
}
next_css = {
'1': 'div#wrapper-facephoto.carousel-active',
'2': 'div#wrapper-idphoto.carousel-active',
'3': 'div#wrapper-review.carousel-active',
'4': 'div#wrapper-review.carousel-active',
}
world.css_click(btn_css[step_num])
# Pressing the button will advance the carousel to the next item
# and give the wrapper div the "carousel-active" class
assert world.css_find(next_css[step_num])
@step(u'I capture my "([^"]*)" photo$')
def capture_my_photo(step, name):
# Hard coded red dot image
image_data = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg=='
snapshot_script = "$('#{}_image')[0].src = '{}';".format(name, image_data)
# Mirror the javascript of the photo_verification.html page
world.browser.execute_script(snapshot_script)
world.browser.execute_script("$('#{}_capture_button').hide();".format(name))
world.browser.execute_script("$('#{}_reset_button').show();".format(name))
world.browser.execute_script("$('#{}_approve_button').show();".format(name))
assert world.css_find('#{}_approve_button'.format(name))
@step(u'I approve my "([^"]*)" photo$')
def approve_my_photo(step, name):
button_css = {
'face': 'div#wrapper-facephoto li.control-approve',
'photo_id': 'div#wrapper-idphoto li.control-approve',
}
wrapper_css = {
'face': 'div#wrapper-facephoto',
'photo_id': 'div#wrapper-idphoto',
}
# Make sure that the carousel is in the right place
assert world.css_has_class(wrapper_css[name], 'carousel-active')
assert world.css_find(button_css[name])
# HACK: for now don't bother clicking the approve button for
# id_photo, because it is sending you back to Step 1.
# Come back and figure it out later. JZ Aug 29 2013
if name=='face':
world.css_click(button_css[name])
# Make sure you didn't advance the carousel
assert world.css_has_class(wrapper_css[name], 'carousel-active')
@step(u'I select a contribution amount$')
def select_contribution_amount(step):
select_contribution(32)
@step(u'I confirm that the details match$')
def confirm_details_match(step):
# First you need to scroll down on the page
# to make the element visible?
# Currently chrome is failing with ElementNotVisibleException
world.browser.execute_script("window.scrollTo(0,1024)")
cb_css = 'input#confirm_pics_good'
world.css_click(cb_css)
assert world.css_find(cb_css).checked
@step(u'I am at the payment page')
def at_the_payment_page(step):
world.wait_for_present('input[name=transactionSignature]')
@step(u'I submit valid payment information$')
def submit_payment(step):
# First make sure that the page is done if it still executing
# an ajax query.
world.wait_for_ajax_complete()
button_css = 'input[value=Submit]'
world.css_click(button_css)
@step(u'I have submitted face and ID photos$')
def submitted_face_and_id_photos(step):
step.given('I am logged in')
step.given('I select the verified track')
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
@step(u'I have submitted photos to verify my identity')
def submitted_photos_to_verify_my_identity(step):
step.given('I have submitted face and ID photos')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I submit my photos and confirm')
def submit_photos_and_confirm(step):
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I see that my payment was successful')
def see_that_my_payment_was_successful(step):
title = world.css_find('div.wrapper-content-main h3.title')
assert_equal(title.text, u'Congratulations! You are now verified on edX.')
@step(u'I navigate to my dashboard')
def navigate_to_my_dashboard(step):
world.css_click('span.avatar')
assert world.css_find('section.my-courses')
@step(u'I see the course on my dashboard')
def see_the_course_on_my_dashboard(step):
course_link_css = 'section.my-courses a[href*="edx/999/Certificates"]'
assert world.is_css_present(course_link_css)
@step(u'I see the upsell link on my dashboard')
def see_upsell_link_on_my_dashboard(step):
course_link_css = UPSELL_LINK_CSS
assert world.is_css_present(course_link_css)
@step(u'I do not see the upsell link on my dashboard')
def see_upsell_link_on_my_dashboard(step):
course_link_css = UPSELL_LINK_CSS
assert world.is_css_not_present(course_link_css)
@step(u'I select the upsell link on my dashboard')
def see_upsell_link_on_my_dashboard(step):
# expand the upsell section
world.css_click('.message-upsell')
course_link_css = UPSELL_LINK_CSS
# click the actual link
world.css_click(course_link_css)
@step(u'I see that I am on the verified track')
def see_that_i_am_on_the_verified_track(step):
id_verified_css = 'li.course-item article.course.verified'
assert world.is_css_present(id_verified_css)
@step(u'I leave the flow and return$')
def leave_the_flow_and_return(step):
world.visit('verify_student/verified/edx/999/Certificates/')
@step(u'I am at the verified page$')
def see_the_payment_page(step):
assert world.css_find('button#pay_button')
@step(u'I edit my name$')
def edit_my_name(step):
btn_css = 'a.retake-photos'
world.css_click(btn_css)
@step(u'I select the honor code option$')
def give_a_reason_why_i_cannot_pay(step):
register()
link_css = 'h5 i.expandable-icon'
world.css_click(link_css)
cb_css = 'input#honor-code'
world.css_click(cb_css)
btn_css = 'input[value="Select Certificate"]'
world.css_click(btn_css)
|
duqiao/django
|
refs/heads/master
|
django/core/management/commands/dbshell.py
|
467
|
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
class Command(BaseCommand):
help = ("Runs the command-line client for specified database, or the "
"default database if none is provided.")
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to '
'open a shell. Defaults to the "default" database.')
def handle(self, **options):
connection = connections[options.get('database')]
try:
connection.client.runshell()
except OSError:
# Note that we're assuming OSError means that the client program
# isn't installed. There's a possibility OSError would be raised
# for some other reason, in which case this error message would be
# inaccurate. Still, this message catches the common case.
raise CommandError('You appear not to have the %r program installed or on your path.' %
connection.client.executable_name)
|
madmack/i747_kernel_ics
|
refs/heads/modified_dhd
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
vizual54/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/scipy/optimize/_minpack.py
|
53
|
import sys
if sys.platform == 'cli':
import clr
clr.AddReference('optimize')
from scipy__optimize___minpack import _lmdif, _chkder, _hybrd, _hybrj, _lmder
from scipy__optimize___minpack import *
|
atplanet/ansible-modules-extras
|
refs/heads/devel
|
cloud/lxc/__init__.py
|
12133432
| |
MohammedWasim/scikit-learn
|
refs/heads/master
|
sklearn/tree/tests/__init__.py
|
12133432
| |
Regner/will
|
refs/heads/master
|
will/storage/__init__.py
|
12133432
| |
mrrookes/arbitools
|
refs/heads/master
|
build/lib/arbitools/__init__.py
|
12133432
| |
xujun10110/golismero
|
refs/heads/master
|
thirdparty_libs/django/conf/locale/lt/__init__.py
|
12133432
| |
atul-bhouraskar/django
|
refs/heads/master
|
tests/reverse_lookup/models.py
|
282
|
"""
Reverse lookups
This demonstrates the reverse lookup features of the database API.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class User(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poll(models.Model):
question = models.CharField(max_length=200)
creator = models.ForeignKey(User, models.CASCADE)
def __str__(self):
return self.question
@python_2_unicode_compatible
class Choice(models.Model):
name = models.CharField(max_length=100)
poll = models.ForeignKey(Poll, models.CASCADE, related_name="poll_choice")
related_poll = models.ForeignKey(Poll, models.CASCADE, related_name="related_choice")
def __str__(self):
return self.name
|
akionakamura/scikit-learn
|
refs/heads/master
|
benchmarks/bench_multilabel_metrics.py
|
86
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
CLOUGH/info3180-project-3
|
refs/heads/master
|
server/lib/werkzeug/contrib/lint.py
|
318
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
|
AeroNotix/algostructure
|
refs/heads/master
|
Python/btree.py
|
1
|
import random
import sys
sys.setrecursionlimit(2000000)
class BinaryTree(object):
def __init__(self):
self.value = None
self.left = None
self.right = None
def add(self, element):
if not self.value:
self.left = BinaryTree()
self.right = BinaryTree()
self.value = element
return
if element > self.value:
return self.right.add(element)
return self.left.add(element)
def walk(self, li=None):
if li is None:
li = []
if not self.value:
return li
if self.left:
self.left.walk(li)
li.append(self.value)
if self.right:
self.right.walk(li)
return li
if __name__ == '__main__':
purepython = BinaryTree()
for x in range(1000000):
purepython.add(random.randint(1,x+1))
|
StrikeForceZero/PJSip-CSharp
|
refs/heads/master
|
tests/pjsua/scripts-call/150_srtp_2_3.py
|
57
|
# $Id: 150_srtp_2_3.py 3334 2010-10-05 16:32:04Z nanang $
#
from inc_cfg import *
test_param = TestParam(
"Callee=mandatory SRTP, caller=optional (with duplicated offer) SRTP",
[
InstanceParam("callee", "--null-audio --use-srtp=2 --srtp-secure=0 --max-calls=1"),
InstanceParam("caller", "--null-audio --use-srtp=3 --srtp-secure=0 --max-calls=1")
]
)
|
atvcaptain/enigma2
|
refs/heads/6.5
|
lib/python/Components/PluginComponent.py
|
1
|
from __future__ import print_function
import os
from shutil import rmtree
from bisect import insort
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS
from Tools.Import import my_import
from Tools.Profile import profile
from Plugins.Plugin import PluginDescriptor
import keymapparser
class PluginComponent:
firstRun = True
restartRequired = False
def __init__(self):
self.plugins = {}
self.pluginList = [ ]
self.installedPluginList = [ ]
self.setPluginPrefix("Plugins.")
self.resetWarnings()
def setPluginPrefix(self, prefix):
self.prefix = prefix
def addPlugin(self, plugin):
if self.firstRun or not plugin.needsRestart:
self.pluginList.append(plugin)
for x in plugin.where:
insort(self.plugins.setdefault(x, []), plugin)
if x == PluginDescriptor.WHERE_AUTOSTART:
plugin(reason=0)
else:
self.restartRequired = True
def removePlugin(self, plugin):
self.pluginList.remove(plugin)
for x in plugin.where:
self.plugins[x].remove(plugin)
if x == PluginDescriptor.WHERE_AUTOSTART:
plugin(reason=1)
def readPluginList(self, directory):
"""enumerates plugins"""
new_plugins = []
for c in os.listdir(directory):
directory_category = os.path.join(directory, c)
if not os.path.isdir(directory_category):
continue
for pluginname in os.listdir(directory_category):
if pluginname == "__pycache__":
continue
path = os.path.join(directory_category, pluginname)
if os.path.isdir(path):
profile('plugin '+pluginname)
try:
plugin = my_import('.'.join(["Plugins", c, pluginname, "plugin"]))
plugins = plugin.Plugins(path=path)
except Exception as exc:
print("Plugin ", c + "/" + pluginname, "failed to load:", exc)
# supress errors due to missing plugin.py* files (badly removed plugin)
for fn in ('plugin.py*'):
if os.path.exists(os.path.join(path, fn)):
self.warnings.append( (c + "/" + pluginname, str(exc)) )
from traceback import print_exc
print_exc()
break
else:
if not pluginname == "WebInterface":
print("Plugin probably removed, but not cleanly in", path)
print("trying to remove:", path)
rmtree(path)
continue
# allow single entry not to be a list
if not isinstance(plugins, list):
plugins = [ plugins ]
for p in plugins:
p.path = path
p.updateIcon(path)
new_plugins.append(p)
keymap = os.path.join(path, "keymap.xml")
if fileExists(keymap):
try:
keymapparser.readKeymap(keymap)
except Exception as exc:
print("keymap for plugin %s/%s failed to load: " % (c, pluginname), exc)
self.warnings.append( (c + "/" + pluginname, str(exc)) )
# build a diff between the old list of plugins and the new one
# internally, the "fnc" argument will be compared with __eq__
plugins_added = [p for p in new_plugins if p not in self.pluginList]
plugins_removed = [p for p in self.pluginList if not p.internal and p not in new_plugins]
#ignore already installed but reloaded plugins
for p in plugins_removed:
for pa in plugins_added:
if pa.path == p.path and pa.where == p.where:
pa.needsRestart = False
for p in plugins_removed:
self.removePlugin(p)
for p in plugins_added:
if self.firstRun or p.needsRestart is False:
self.addPlugin(p)
else:
for installed_plugin in self.installedPluginList:
if installed_plugin.path == p.path:
if installed_plugin.where == p.where:
p.needsRestart = False
self.addPlugin(p)
if self.firstRun:
self.firstRun = False
self.installedPluginList = self.pluginList
def getPlugins(self, where):
"""Get list of plugins in a specific category"""
if not isinstance(where, list):
# if not a list, we're done quickly, because the
# lists are already sorted
return self.plugins.get(where, [])
res = []
# Efficiently merge two sorted lists together, though this
# appears to never be used in code anywhere...
for x in where:
for p in self.plugins.get(x, []):
insort(res, p)
return res
def getPluginsForMenu(self, menuid):
res = [ ]
for p in self.getPlugins(PluginDescriptor.WHERE_MENU):
res += p(menuid)
return res
def getDescriptionForMenuEntryID(self, menuid, entryid):
for p in self.getPlugins(PluginDescriptor.WHERE_MENU):
if p(menuid) and isinstance(p(menuid), (list, tuple)):
if p(menuid)[0][2] == entryid:
return p.description
def clearPluginList(self):
self.pluginList = []
self.plugins = {}
def reloadPlugins(self, dummy=False):
self.clearPluginList()
self.readPluginList(resolveFilename(SCOPE_PLUGINS))
def shutdown(self):
for p in self.pluginList[:]:
self.removePlugin(p)
def resetWarnings(self):
self.warnings = [ ]
def getNextWakeupTime(self, getPluginIdent=False):
wakeup = -1
pident = ""
for p in self.pluginList:
current = p.getWakeupTime()
if current > -1 and (wakeup > current or wakeup == -1):
wakeup = current
pident = str(p.name) + " | " + str(p.path and p.path.split('/')[-1])
if getPluginIdent:
return int(wakeup), pident
return int(wakeup)
plugins = PluginComponent()
|
marcsans/cnn-physics-perception
|
refs/heads/master
|
phy/lib/python2.7/site-packages/theano/tensor/slinalg.py
|
6
|
import logging
import warnings
from six.moves import xrange
import numpy
try:
import scipy.linalg
imported_scipy = True
except ImportError:
# some ops (e.g. Cholesky, Solve, A_Xinv_b) won't work
imported_scipy = False
from theano import tensor
import theano.tensor
from theano.tensor import as_tensor_variable
from theano.gof import Op, Apply
logger = logging.getLogger(__name__)
MATRIX_STRUCTURES = (
'general',
'symmetric',
'lower_triangular',
'upper_triangular',
'hermitian',
'banded',
'diagonal',
'toeplitz')
class Cholesky(Op):
"""
Return a triangular matrix square root of positive semi-definite `x`.
L = cholesky(X, lower=True) implies dot(L, L.T) == X.
"""
# TODO: inplace
# TODO: for specific dtypes
# TODO: LAPACK wrapper with in-place behavior, for solve also
__props__ = ('lower', 'destructive')
def __init__(self, lower=True):
self.lower = lower
self.destructive = False
def infer_shape(self, node, shapes):
return [shapes[0]]
def make_node(self, x):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Cholesky op")
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
x = inputs[0]
z = outputs[0]
z[0] = scipy.linalg.cholesky(x, lower=self.lower).astype(x.dtype)
def grad(self, inputs, gradients):
return [CholeskyGrad(self.lower)(inputs[0], self(inputs[0]),
gradients[0])]
cholesky = Cholesky()
class CholeskyGrad(Op):
"""
"""
__props__ = ('lower', 'destructive')
def __init__(self, lower=True):
self.lower = lower
self.destructive = False
def make_node(self, x, l, dz):
x = as_tensor_variable(x)
l = as_tensor_variable(l)
dz = as_tensor_variable(dz)
assert x.ndim == 2
assert l.ndim == 2
assert dz.ndim == 2
assert l.owner.op.lower == self.lower, (
"lower/upper mismatch between Cholesky op and CholeskyGrad op"
)
return Apply(self, [x, l, dz], [x.type()])
def perform(self, node, inputs, outputs):
"""
Implements the "reverse-mode" gradient [1]_ for the
Cholesky factorization of a positive-definite matrix.
References
----------
.. [1] S. P. Smith. "Differentiation of the Cholesky Algorithm".
Journal of Computational and Graphical Statistics,
Vol. 4, No. 2 (Jun.,1995), pp. 134-147
http://www.jstor.org/stable/1390762
"""
x = inputs[0]
L = inputs[1]
dz = inputs[2]
dx = outputs[0]
N = x.shape[0]
if self.lower:
F = numpy.tril(dz)
for k in xrange(N - 1, -1, -1):
for j in xrange(k + 1, N):
for i in xrange(j, N):
F[i, k] -= F[i, j] * L[j, k]
F[j, k] -= F[i, j] * L[i, k]
for j in xrange(k + 1, N):
F[j, k] /= L[k, k]
F[k, k] -= L[j, k] * F[j, k]
F[k, k] /= (2 * L[k, k])
else:
F = numpy.triu(dz)
for k in xrange(N - 1, -1, -1):
for j in xrange(k + 1, N):
for i in xrange(j, N):
F[k, i] -= F[j, i] * L[k, j]
F[k, j] -= F[j, i] * L[k, i]
for j in xrange(k + 1, N):
F[k, j] /= L[k, k]
F[k, k] -= L[k, j] * F[k, j]
F[k, k] /= (2 * L[k, k])
dx[0] = F
def infer_shape(self, node, shapes):
return [shapes[0]]
class Solve(Op):
"""
Solve a system of linear equations.
"""
__props__ = ('A_structure', 'lower', 'overwrite_A', 'overwrite_b')
def __init__(self,
A_structure='general',
lower=False,
overwrite_A=False,
overwrite_b=False):
if A_structure not in MATRIX_STRUCTURES:
raise ValueError('Invalid matrix structure argument', A_structure)
self.A_structure = A_structure
self.lower = lower
self.overwrite_A = overwrite_A
self.overwrite_b = overwrite_b
def __repr__(self):
return 'Solve{%s}' % str(self._props())
def make_node(self, A, b):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Solve op")
A = as_tensor_variable(A)
b = as_tensor_variable(b)
assert A.ndim == 2
assert b.ndim in [1, 2]
otype = tensor.tensor(
broadcastable=b.broadcastable,
dtype=(A * b).dtype)
return Apply(self, [A, b], [otype])
def perform(self, node, inputs, output_storage):
A, b = inputs
if self.A_structure == 'lower_triangular':
rval = scipy.linalg.solve_triangular(
A, b, lower=True)
elif self.A_structure == 'upper_triangular':
rval = scipy.linalg.solve_triangular(
A, b, lower=False)
else:
rval = scipy.linalg.solve(A, b)
output_storage[0][0] = rval
# computes shape of x where x = inv(A) * b
def infer_shape(self, node, shapes):
Ashape, Bshape = shapes
rows = Ashape[1]
if len(Bshape) == 1: # b is a Vector
return [(rows,)]
else:
cols = Bshape[1] # b is a Matrix
return [(rows, cols)]
solve = Solve() # general solve
# TODO : SolveTriangular
# TODO: Optimizations to replace multiplication by matrix inverse
# with solve() Op (still unwritten)
class Eigvalsh(Op):
"""
Generalized eigenvalues of a Hermitian positive definite eigensystem.
"""
__props__ = ('lower',)
def __init__(self, lower=True):
assert lower in [True, False]
self.lower = lower
def make_node(self, a, b):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Eigvalsh op")
if b == theano.tensor.NoneConst:
a = as_tensor_variable(a)
assert a.ndim == 2
out_dtype = theano.scalar.upcast(a.dtype)
w = theano.tensor.vector(dtype=out_dtype)
return Apply(self, [a], [w])
else:
a = as_tensor_variable(a)
b = as_tensor_variable(b)
assert a.ndim == 2
assert b.ndim == 2
out_dtype = theano.scalar.upcast(a.dtype, b.dtype)
w = theano.tensor.vector(dtype=out_dtype)
return Apply(self, [a, b], [w])
def perform(self, node, inputs, outputs):
(w,) = outputs
if len(inputs) == 2:
w[0] = scipy.linalg.eigvalsh(a=inputs[0], b=inputs[1], lower=self.lower)
else:
w[0] = scipy.linalg.eigvalsh(a=inputs[0], b=None, lower=self.lower)
def grad(self, inputs, g_outputs):
a, b = inputs
gw, = g_outputs
return EigvalshGrad(self.lower)(a, b, gw)
def infer_shape(self, node, shapes):
n = shapes[0][0]
return [(n,)]
class EigvalshGrad(Op):
"""
Gradient of generalized eigenvalues of a Hermitian positive definite
eigensystem.
"""
# Note: This Op (EigvalshGrad), should be removed and replaced with a graph
# of theano ops that is constructed directly in Eigvalsh.grad.
# But this can only be done once scipy.linalg.eigh is available as an Op
# (currently the Eigh uses numpy.linalg.eigh, which doesn't let you
# pass the right-hand-side matrix for a generalized eigenproblem.) See the
# discussion on github at
# https://github.com/Theano/Theano/pull/1846#discussion-diff-12486764
__props__ = ('lower',)
def __init__(self, lower=True):
assert lower in [True, False]
self.lower = lower
if lower:
self.tri0 = numpy.tril
self.tri1 = lambda a: numpy.triu(a, 1)
else:
self.tri0 = numpy.triu
self.tri1 = lambda a: numpy.tril(a, -1)
def make_node(self, a, b, gw):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the GEigvalsh op")
a = as_tensor_variable(a)
b = as_tensor_variable(b)
gw = as_tensor_variable(gw)
assert a.ndim == 2
assert b.ndim == 2
assert gw.ndim == 1
out_dtype = theano.scalar.upcast(a.dtype, b.dtype, gw.dtype)
out1 = theano.tensor.matrix(dtype=out_dtype)
out2 = theano.tensor.matrix(dtype=out_dtype)
return Apply(self, [a, b, gw], [out1, out2])
def perform(self, node, inputs, outputs):
(a, b, gw) = inputs
w, v = scipy.linalg.eigh(a, b, lower=self.lower)
gA = v.dot(numpy.diag(gw).dot(v.T))
gB = - v.dot(numpy.diag(gw * w).dot(v.T))
# See EighGrad comments for an explanation of these lines
out1 = self.tri0(gA) + self.tri1(gA).T
out2 = self.tri0(gB) + self.tri1(gB).T
outputs[0][0] = numpy.asarray(out1, dtype=node.outputs[0].dtype)
outputs[1][0] = numpy.asarray(out2, dtype=node.outputs[1].dtype)
def infer_shape(self, node, shapes):
return [shapes[0], shapes[1]]
def eigvalsh(a, b, lower=True):
return Eigvalsh(lower)(a, b)
def kron(a, b):
""" Kronecker product.
Same as scipy.linalg.kron(a, b).
Parameters
----------
a: array_like
b: array_like
Returns
-------
array_like with a.ndim + b.ndim - 2 dimensions
Notes
-----
numpy.kron(a, b) != scipy.linalg.kron(a, b)!
They don't have the same shape and order when
a.ndim != b.ndim != 2.
"""
a = tensor.as_tensor_variable(a)
b = tensor.as_tensor_variable(b)
if (a.ndim + b.ndim <= 2):
raise TypeError('kron: inputs dimensions must sum to 3 or more. '
'You passed %d and %d.' % (a.ndim, b.ndim))
o = tensor.outer(a, b)
o = o.reshape(tensor.concatenate((a.shape, b.shape)),
a.ndim + b.ndim)
shf = o.dimshuffle(0, 2, 1, * list(range(3, o.ndim)))
if shf.ndim == 3:
shf = o.dimshuffle(1, 0, 2)
o = shf.flatten()
else:
o = shf.reshape((o.shape[0] * o.shape[2],
o.shape[1] * o.shape[3]) +
tuple(o.shape[i] for i in xrange(4, o.ndim)))
return o
class Expm(Op):
"""
Compute the matrix exponential of a square array.
"""
__props__ = ()
def make_node(self, A):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Expm op")
A = as_tensor_variable(A)
assert A.ndim == 2
expm = theano.tensor.matrix(dtype=A.dtype)
return Apply(self, [A, ], [expm, ])
def perform(self, node, inputs, outputs):
(A,) = inputs
(expm,) = outputs
expm[0] = scipy.linalg.expm(A)
def grad(self, inputs, outputs):
(A,) = inputs
(g_out,) = outputs
return [ExpmGrad()(A, g_out)]
def infer_shape(self, node, shapes):
return [shapes[0]]
class ExpmGrad(Op):
"""
Gradient of the matrix exponential of a square array.
"""
__props__ = ()
def make_node(self, A, gw):
assert imported_scipy, (
"Scipy not available. Scipy is needed for the Expm op")
A = as_tensor_variable(A)
assert A.ndim == 2
out = theano.tensor.matrix(dtype=A.dtype)
return Apply(self, [A, gw], [out, ])
def infer_shape(self, node, shapes):
return [shapes[0]]
def perform(self, node, inputs, outputs):
# Kalbfleisch and Lawless, J. Am. Stat. Assoc. 80 (1985) Equation 3.4
# Kind of... You need to do some algebra from there to arrive at
# this expression.
(A, gA) = inputs
(out,) = outputs
w, V = scipy.linalg.eig(A, right=True)
U = scipy.linalg.inv(V).T
exp_w = numpy.exp(w)
X = numpy.subtract.outer(exp_w, exp_w) / numpy.subtract.outer(w, w)
numpy.fill_diagonal(X, exp_w)
Y = U.dot(V.T.dot(gA).dot(U) * X).dot(V.T)
with warnings.catch_warnings():
warnings.simplefilter("ignore", numpy.ComplexWarning)
out[0] = Y.astype(A.dtype)
expm = Expm()
|
crunchmail/munch-core
|
refs/heads/master
|
src/munch/urls.py
|
1
|
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls.i18n import i18n_patterns
import rest_framework.urls
from rest_framework_jwt.views import refresh_jwt_token
import munch.apps.abuse.urls
import munch.apps.users.urls
import munch.apps.hosted.urls
import munch.apps.optouts.urls
import munch.apps.tracking.urls
from munch.core.views import api_root
from munch.apps.users.api.v1.views import ObtainJSONWebToken
api_urlpatterns_v1 = [
url(r'^$', api_root, name='api-root'),
]
api_urlpatterns_v2 = [
url(r'^$', api_root, name='api-root'),
]
urlpatterns = [
url(r'^auth/', include(
rest_framework.urls, namespace='rest_framework')),
url(r'^api-token-auth', ObtainJSONWebToken.as_view()),
url(r'^api-token-refresh', refresh_jwt_token),
url(r'^abuse/', include(munch.apps.abuse.urls)),
url(r'^t/', include(munch.apps.tracking.urls)),
url(r'^h/', include(munch.apps.optouts.urls)),
url(r'^account/', include(munch.apps.users.urls)),
url(r'', include(munch.apps.hosted.urls)),
url(r'^v1/', include(api_urlpatterns_v1, namespace='v1')),
# url(r'^v2/', include(api_urlpatterns_v2, namespace='v2'))
]
urlpatterns += i18n_patterns(url(r'^admin/', admin.site.urls))
|
Partoo/scrapy
|
refs/heads/master
|
scrapy/interfaces.py
|
15
|
from zope.interface import Interface
class ISpiderManager(Interface):
def from_settings(settings):
"""Returns an instance of the class for the given settings"""
def load(spider_name):
"""Returns the Spider class for the given spider name. If the spider
name is not found, it must raise a KeyError."""
def list():
"""Return a list with the names of all spiders available in the
project"""
def find_by_request(request):
"""Returns the list of spiders names that can handle the given request"""
|
whiteclover/white
|
refs/heads/master
|
white/lang/en_GB/page.py
|
4
|
t = {
'page': 'Pages',
'create_page': 'Create a new page',
'nopages_desc': 'You don\'t have any pages.',
'redirect': 'Redirect',
# form fields
'redirect_url': 'Redirect Url',
'redirect_missing': 'Please enter a valid url',
'title': 'Page title',
'title_explain': '',
'title_missing': 'Please enter a page title',
'content': 'Content',
'content_explain': 'Your page\'s content. Uses Markdown.',
'show_in_menu': 'Show In Menu',
'show_in_menu_explain': '',
'name': 'Name',
'name_explain': '',
'slug': 'Slug',
'slug_explain': 'Slug uri to identify your page, should only contain ascii characters',
'slug_missing': 'Please enter a slug uri, slugs can only contain ascii characters',
'slug_duplicate': 'Slug already exists',
'slug_invalid': 'Slug must contain letters',
'status': 'Status',
'status_explain': '',
'parent': 'Parent',
'parent_explain': '',
# messages
'updated': 'Your page was updated.',
'created': 'Your page was created.',
'deleted': 'Your page was deleted.'
}
|
yohanko88/gem5-DC
|
refs/heads/master
|
src/mem/ruby/network/BasicRouter.py
|
53
|
# Copyright (c) 2011 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from ClockedObject import ClockedObject
class BasicRouter(ClockedObject):
type = 'BasicRouter'
cxx_header = "mem/ruby/network/BasicRouter.hh"
router_id = Param.Int("ID in relation to other routers")
|
heathy/ProjectEuler
|
refs/heads/master
|
projecteuler/tests/__init__.py
|
12133432
| |
omwomotieno/tunza_v3
|
refs/heads/work_branch
|
reports/migrations/__init__.py
|
12133432
| |
Khushmeet/mlsite
|
refs/heads/master
|
app/load.py
|
2
|
import numpy as np
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
from collections import Counter
import tweepy
import boto3.session
import _pickle
import h5py
import gc
session = boto3.session.Session(region_name='ap-south-1')
s3client = session.client('s3', config=boto3.session.Config(signature_version='s3v4', region_name='ap-south-1'),
aws_access_key_id='**',
aws_secret_access_key='**+T')
def most_common(lst):
return max(set(lst), key=lst.count)
def load_from_s3(str):
response = s3client.get_object(Bucket='mlsite-bucket', Key=str)
body = response['Body']
if '.h5' in str:
f = open(body, 'rb')
h = h5py.File(f, 'r')
detector = load_model(h)
else:
detector = _pickle.loads(body.read())
return detector
def load_offline(str):
with open(str, 'rb') as f:
dump = _pickle.load(f)
return dump
word2index = load_offline('app/static/models/word2index.pkl')
vectorizer = load_offline('app/static/models/vectorizer.pkl')
def init_model():
lstm_model = load_model('app/static/models/lstm.h5')
cnn_model = load_model('app/static/models/cnn.h5')
cnn_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
lstm_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
graph = tf.get_default_graph()
return lstm_model, cnn_model, graph
lmodel, cnn, graph = init_model()
logistic = load_offline('app/static/models/logisticreg.pkl')
adaboost = load_offline('app/static/models/adaboost.pkl')
bernoulli = load_offline('app/static/models/bernoullinb.pkl')
decisiontree = load_offline('app/static/models/decisiontree.pkl')
gradientboost = load_offline('app/static/models/gradientboost.pkl')
knn = load_offline('app/static/models/knn.pkl')
randomforest = load_offline('app/static/models/randomforest.pkl')
multinomialnb = load_offline('app/static/models/multinomialnb.pkl')
svm10 = load_offline('app/static/models/svm10.pkl')
auth = tweepy.OAuthHandler('hXJ8TwQzVya3yYwQN1GNvGNNp', 'diX9CFVOOfWNli2KTAYY13vZVJgw1sYlEeOTxsLsEb2x73oI8S')
auth.set_access_token('2155329456-53H1M9QKqlQbEkLExgVgkeallweZ9N74Aigm9Kh',
'waDPwamuPkYHFLdVNZ5YF2SNWuYfGHDVFue6bEbEGjTZb')
api = tweepy.API(auth)
def clean(query):
return vectorizer.transform([query])
# def pencode(text):
# vector = np.zeros(12429)
# for i, word in enumerate(text.split(' ')):
# try:
# vector[word2index[word]] = 1
# except KeyError:
# vector[i] = 0
# return vector
def lencode(text):
vector = []
for word in text.split(' '):
try:
vector.append(word2index[word])
except KeyError:
vector.append(0)
padded_seq = pad_sequences([vector], maxlen=100, value=0.)
return padded_seq
def word_feats(text):
return dict([(word, True) for word in text.split(' ')])
def predictor(query):
clean_query = clean(query)
ada = adaboost.predict(clean_query)
ber = bernoulli.predict(clean_query)
lg = logistic.predict(clean_query)
dt = decisiontree.predict(clean_query)
gb = gradientboost.predict(clean_query.toarray())
knnp = knn.predict(clean_query)
rf = randomforest.predict(clean_query)
mnb = multinomialnb.predict(clean_query)
svm = svm10.predict(clean_query)
with graph.as_default():
lout = lmodel.predict(lencode(query))
cnn_out = cnn.predict(lencode(query))
lout = np.argmax(lout, axis=1)
cnn_out = np.argmax(cnn_out, axis=1)
return [ada.tolist()[0],
ber.tolist()[0],
dt.tolist()[0],
gb.tolist()[0],
knnp.tolist()[0],
rf.tolist()[0],
mnb.tolist()[0],
lg.tolist()[0],
svm.tolist()[0],
lout.tolist()[0],
cnn_out.tolist()[0]]
def get_most_count(x):
return Counter(x).most_common()[0][0]
def processing_results(query):
predict_list = []
line_sentiment = []
for t in query:
p = predictor(t)
line_sentiment.append(most_common(p))
predict_list.append(p)
data = {'AdaBoost': 0,
'BernoulliNB': 0,
'DecisionTree': 0,
'GradientBoost': 0,
'KNNeighbors': 0,
'RandomForest': 0,
'MultinomialNB': 0,
'Logistic Regression': 0,
'SVM': 0,
'LSTM network': 0,
'Convolutional Neural Network': 0}
# overal per sentence
predict_list = np.array(predict_list)
i = 0
for key in data:
data[key] = get_most_count(predict_list[:, i])
i += 1
# all the sentences with 3 emotions
predict_list = predict_list.tolist()
emotion_sents = [0, 0, 0]
for p in predict_list:
if most_common(p) == 0:
emotion_sents[0] += 1
elif most_common(p) == 1:
emotion_sents[1] += 1
else:
emotion_sents[2] += 1
# overall score
score = most_common(list(data.values()))
gc.collect()
return data, emotion_sents, score, line_sentiment, query, len(query)
|
ramaganapathy1/AMuDA-Ir-back-end
|
refs/heads/master
|
vEnv/lib/python2.7/site-packages/werkzeug/routing.py
|
87
|
# -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
exception is raised.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import difflib
import re
import uuid
import posixpath
from pprint import pformat
from threading import Lock
from werkzeug.urls import url_encode, url_quote, url_join
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed, \
BadHost
from werkzeug._internal import _get_environ, _encode_idna
from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \
text_type, string_types, native_string_result, \
implements_to_string, wsgi_decoding_dance
from werkzeug.datastructures import ImmutableDict, MultiDict
from werkzeug.utils import cached_property
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE | re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
@implements_to_string
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum([
0.98 * difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods)
])
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append('Could not build url for endpoint %r' % self.endpoint)
if self.method:
message.append(' (%r)' % self.method)
if self.values:
message.append(' with values %r' % sorted(self.values.keys()))
message.append('.')
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(' Did you mean to use methods %r?' % sorted(
self.suggested.methods
))
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
' Did you forget to specify values %r?' %
sorted(missing_values)
)
else:
message.append(
' Did you mean %r instead?' % self.suggested.endpoint
)
return u''.join(message)
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
if isinstance(methods, str):
raise TypeError('param `methods` should be `Iterable[str]`, not `str`')
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(defaults=defaults, subdomain=self.subdomain,
methods=self.methods, build_only=self.build_only,
endpoint=self.endpoint, strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to, alias=self.alias,
host=self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError('the converter %r does not exist' % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(
variable, converter, c_args, c_kwargs)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__') and \
(method is None or self.methods is None or
method in self.methods):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(to_bytes(data, self.map.charset), safe='/:|+'))
domain_part, url = (u''.join(tmp)).split(u'|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += u'?' + url_encode(query_vars, charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u'<%s (unbound)>' % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u'<%s>' % data)
else:
tmp.append(data)
return u'<%s %s%s -> %s>' % (
self.__class__.__name__,
repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'),
self.methods is not None
and u' (%s)' % u', '.join(self.methods)
or u'',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, charset=self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-' \
r'[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}'
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter,
'uuid': UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if 'HTTP_HOST' in environ:
wsgi_server_name = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' \
and wsgi_server_name.endswith(':80'):
wsgi_server_name = wsgi_server_name[:-3]
elif environ['wsgi.url_scheme'] == 'https' \
and wsgi_server_name.endswith(':443'):
wsgi_server_name = wsgi_server_name[:-4]
else:
wsgi_server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string('SCRIPT_NAME')
path_info = _get_wsgi_string('PATH_INFO')
query_args = _get_wsgi_string('QUERY_STRING')
return Map.bind(self, server_name, script_name,
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], path_info,
query_args=query_args)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u'/'):
script_name += u'/'
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|%s' % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and '/%s' % path_info.lstrip('/')
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path, method)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
url_quote(path_info, self.map.charset,
safe='/:|+') + '/', query_args))
except RequestAliasRedirect as e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(url_join('%s://%s%s%s' % (
self.url_scheme or 'http',
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException as e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, 'ascii')
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, 'ascii')
return (subdomain and subdomain + u'.' or u'') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme or 'http',
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
path_info.lstrip('/')),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
valueiter = iteritems(values, multi=True)
else:
valueiter = iteritems(values)
values = dict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)
):
return str(url_join(self.script_name, './' + path.lstrip('/')))
return str('%s//%s%s/%s' % (
self.url_scheme + ':' if self.url_scheme else '',
host,
self.script_name[:-1],
path.lstrip('/')
))
|
tigersirvine/occtigerscricket
|
refs/heads/master
|
django/contrib/localflavor/ro/forms.py
|
85
|
# -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
from __future__ import absolute_import
from django.contrib.localflavor.ro.ro_counties import COUNTIES_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, max_length=10, min_length=2, *args, **kwargs):
super(ROCIFField, self).__init__(r'^(RO)?[0-9]{2,10}', max_length,
min_length, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(key_iter.next())
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_error_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, max_length=13, min_length=13, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length,
min_length, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# check birthdate digits
import datetime
try:
datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(value_iter.next())
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': u'Enter a Romanian county code or name.',
}
def clean(self, value):
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, max_length=40, min_length=24, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, max_length=20, min_length=10, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, max_length=6, min_length=6, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length, min_length, *args, **kwargs)
|
bepatient-fr/ikaaro
|
refs/heads/0.78
|
ikaaro/datatypes.py
|
3
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2008 Juan David Ibáñez Palomar <jdavid@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from standard library
from datetime import date
# Import from the Standard Library
from base64 import decodestring, encodestring
from datetime import timedelta
from marshal import dumps, loads
from urllib import quote, unquote
from zlib import compress, decompress
# Import from itools
from itools.core import freeze, guess_type
from itools.datatypes import DataType, Date, Enumerate, String
from itools.fs import FileName
from itools.gettext import MSG
from itools.html import stream_to_str_as_xhtml, stream_to_str_as_html
from itools.html import xhtml_doctype, sanitize_stream, stream_is_empty
from itools.xml import XMLParser, is_xml_stream
"""This module defines some datatypes used in ikaaro, whose inclusion in
itools is not yet clear.
"""
encoding_map = {
'gzip': 'application/x-gzip',
'bzip2': 'application/x-bzip2'}
def guess_mimetype(filename, default):
"""Override itools function 'guess_type' to intercept the encoding.
"""
name, extension, language = FileName.decode(filename)
filename = FileName.encode((name, extension, None))
mimetype, encoding = guess_type(filename)
return encoding_map.get(encoding, mimetype or default)
class FileDataType(DataType):
"""FIXME This datatype is special in that it does not deserializes from
a byte string, but from a tuple. Some day we should find a correct
solution.
"""
@staticmethod
def encode(value):
"""Cannot preload anything in a file input.
"""
return None
@staticmethod
def decode(data):
"""Find out the resource class (the mimetype sent by the browser can be
minimalistic).
"""
filename, mimetype, body = data
mimetype = guess_mimetype(filename, mimetype)
return filename, mimetype, body
class Password_Datatype(DataType):
@staticmethod
def decode(data):
return decodestring(unquote(data))
@staticmethod
def encode(value):
return quote(encodestring(value))
class ChoosePassword_Datatype(String):
@staticmethod
def is_valid(value):
return len(value) >= 4
class CopyCookie(DataType):
default = None, freeze([])
@staticmethod
def encode(value):
return quote(compress(dumps(value), 9))
@staticmethod
def decode(str):
return loads(decompress(unquote(str)))
class ExpireValue(DataType):
@staticmethod
def decode(value):
return timedelta(minutes=int(value))
@staticmethod
def encode(value):
return str(int(value.total_seconds() / 60))
class BirthDate(Date):
pass
class HexadecimalColor(String):
@staticmethod
def is_valid(value):
return value.startswith('#') and len(value) == 7
###########################################################################
# HTML
###########################################################################
xhtml_namespaces = {None: 'http://www.w3.org/1999/xhtml'}
class XHTMLBody(DataType):
"""Read and write XHTML.
"""
sanitize_html = True
def decode(cls, data):
events = XMLParser(data, namespaces=xhtml_namespaces,
doctype=xhtml_doctype)
if cls.sanitize_html is True:
events = sanitize_stream(events)
return list(events)
@staticmethod
def encode(value):
if value is None:
return ''
return stream_to_str_as_xhtml(value)
@staticmethod
def is_empty(value):
return stream_is_empty(value)
class HTMLBody(XHTMLBody):
"""TinyMCE specifics: read as XHTML, rendered as HTML.
"""
@staticmethod
def encode(value):
if value is None:
return ''
if type(value) is unicode:
return value.encode('utf-8')
if not is_xml_stream(value):
value = value.get_body().get_content_elements()
return stream_to_str_as_html(value)
###########################################################################
# Enumerates
###########################################################################
days = {
0: MSG(u'Monday'),
1: MSG(u'Tuesday'),
2: MSG(u'Wednesday'),
3: MSG(u'Thursday'),
4: MSG(u'Friday'),
5: MSG(u'Saturday'),
6: MSG(u'Sunday')}
class DaysOfWeek(Enumerate):
options = [
{'name':'1', 'value': MSG(u'Monday'), 'shortname': 'MO'},
{'name':'2', 'value': MSG(u'Tuesday'), 'shortname': 'TU'},
{'name':'3', 'value': MSG(u'Wednesday'), 'shortname': 'WE'},
{'name':'4', 'value': MSG(u'Thursday'), 'shortname': 'TH'},
{'name':'5', 'value': MSG(u'Friday'), 'shortname': 'FR'},
{'name':'6', 'value': MSG(u'Saturday'), 'shortname': 'SA'},
{'name':'7', 'value': MSG(u'Sunday'), 'shortname': 'SU'}]
@classmethod
def get_shortname(cls, name):
for option in cls.options:
if option['name'] == name:
return option['shortname']
@classmethod
def get_name_by_shortname(cls, shortname):
for option in cls.options:
if option['shortname'] == shortname:
return option['name']
class Boolean3(Enumerate):
""" Boolean 3 states : Yes/No/Any useful on search form."""
default = ''
options = [
#{'name': '', 'value': u''},
{'name': '1', 'value': MSG(u'Yes')},
{'name': '0', 'value': MSG(u'No')}]
@staticmethod
def decode(value):
if value is '':
return None
return bool(int(value))
@staticmethod
def encode(value):
if value is True:
return '1'
elif value is False:
return '0'
return None
@staticmethod
def is_valid(value):
return value in (True, False, None)
def get_namespace(cls, name):
if name in (True, False, None):
name = Boolean3.encode(name)
return Enumerate(options=cls.get_options()).get_namespace(name)
class IntegerRange(Enumerate):
count = 4
@classmethod
def get_options(cls):
return [
{'name': str(i), 'value': str(i)} for i in range(1, cls.count) ]
class Days(IntegerRange):
count = 32
class Months(Enumerate):
options = [
{'name': '1', 'value': MSG(u'January')},
{'name': '2', 'value': MSG(u'February')},
{'name': '3', 'value': MSG(u'March')},
{'name': '4', 'value': MSG(u'April')},
{'name': '5', 'value': MSG(u'May')},
{'name': '6', 'value': MSG(u'June')},
{'name': '7', 'value': MSG(u'July')},
{'name': '8', 'value': MSG(u'August')},
{'name': '9', 'value': MSG(u'September')},
{'name': '10', 'value': MSG(u'October')},
{'name': '11', 'value': MSG(u'November')},
{'name': '12', 'value': MSG(u'December')}]
class Years(Enumerate):
start = 1900
@classmethod
def get_options(cls):
return [ {'name': str(d), 'value': str(d)}
for d in range(cls.start, date.today().year) ]
|
LaboratoireMecaniqueLille/crappy
|
refs/heads/master
|
crappy/tool/gpucorrel.py
|
1
|
# coding:utf-8
import warnings
from math import ceil
import numpy as np
from .._global import OptionalModule
try:
import cv2
except (ModuleNotFoundError, ImportError):
cv2 = OptionalModule("opencv-python")
from .fields import get_field
from .._global import OptionalModule
try:
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
from pycuda.reduction import ReductionKernel
except ImportError:
cuda = OptionalModule("pycuda",
"PyCUDA and CUDA are necessary to use GPUCorrel")
SourceModule = OptionalModule("pycuda",
"PyCUDA and CUDA are necessary to use GPUCorrel")
gpuarray = OptionalModule("pycuda",
"PyCUDA and CUDA are necessary to use GPUCorrel")
ReductionKernel = OptionalModule("pycuda",
"PyCUDA and CUDA are necessary to use GPUCorrel")
context = None
def interp_nearest(ary, ny, nx):
"""Used to interpolate the mask for each stage."""
if ary.shape == (ny, nx):
return ary
y, x = ary.shape
rx = x / nx
ry = y / ny
out = np.empty((ny, nx), dtype=np.float32)
for j in range(ny):
for i in range(nx):
out[j, i] = ary[int(ry * j + .5), int(rx * i + .5)]
return out
# =======================================================================#
# = =#
# = Class CorrelStage: =#
# = =#
# =======================================================================#
class CorrelStage:
"""Run a correlation routine on an image, at a given resolution.
Note:
Multiple instances of this class are used for the pyramidal correlation in
`Correl()`.
Can but is not meant to be used as is.
"""
num = 0 # To count the instances so they get a unique number (self.num)
def __init__(self, img_size, **kwargs):
self.num = CorrelStage.num
CorrelStage.num += 1
self.verbose = kwargs.get("verbose", 0)
self.debug(2, "Initializing with resolution", img_size)
self.h, self.w = img_size
self._ready = False
self.nbIter = kwargs.get("iterations", 5)
self.showDiff = kwargs.get("show_diff", False)
if self.showDiff:
try:
import cv2
except (ModuleNotFoundError, ImportError):
cv2 = OptionalModule("opencv-python")
cv2.namedWindow("Residual", cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
self.mul = kwargs.get("mul", 3)
# These two store the values of the last resampled array
# It is meant to allocate output array only once (see resample_d)
self.rX, self.rY = -1, -1
# self.loop will be incremented every time get_disp is called
# It will be used to measure performance and output some info
self.loop = 0
# Allocating stuff #
# Grid and block for kernels called with the size of the image #
# All the images and arrays in the kernels will be in order (x,y)
self.grid = (int(ceil(self.w / 32)),
int(ceil(self.h / 32)))
self.block = (int(ceil(self.w / self.grid[0])),
int(ceil(self.h / self.grid[1])), 1)
self.debug(3, "Default grid:", self.grid, "block", self.block)
# We need the number of fields to allocate the G tables #
self.Nfields = kwargs.get("Nfields")
if self.Nfields is None:
self.Nfields = len(kwargs.get("fields")[0])
# Allocating everything we need #
self.devG = []
self.devFieldsX = []
self.devFieldsY = []
for i in range(self.Nfields):
# devG stores the G arrays (to compute the research direction)
self.devG.append(gpuarray.empty(img_size, np.float32))
# devFieldsX/Y store the fields value along X and Y
self.devFieldsX.append(gpuarray.empty((self.h, self.w), np.float32))
self.devFieldsY.append(gpuarray.empty((self.h, self.w), np.float32))
# devH Stores the Hessian matrix
self.H = np.zeros((self.Nfields, self.Nfields), np.float32)
# And devHi stores its invert
self.devHi = gpuarray.empty((self.Nfields, self.Nfields), np.float32)
# devOut is written with the difference of the images
self.devOut = gpuarray.empty((self.h, self.w), np.float32)
# devX stores the value of the parameters (what is actually computed)
self.devX = gpuarray.empty(self.Nfields, np.float32)
# to store the research direction
self.devVec = gpuarray.empty(self.Nfields, np.float32)
# To store the original image on the device
self.devOrig = gpuarray.empty(img_size, np.float32)
# To store the gradient along X of the original image on the device
self.devGradX = gpuarray.empty(img_size, np.float32)
# And along Y
self.devGradY = gpuarray.empty(img_size, np.float32)
# Locating the kernel file #
kernel_file = kwargs.get("kernel_file")
if kernel_file is None:
self.debug(2, "Kernel file not specified")
from crappy import __path__ as crappy_path
kernel_file = crappy_path[0] + "/data/kernels.cu"
# Reading kernels and compiling module #
with open(kernel_file, "r") as f:
self.debug(3, "Sourcing module")
self.mod = SourceModule(f.read() % (self.w, self.h, self.Nfields))
# Assigning functions to the kernels #
# These kernels are defined in data/kernels.cu
self._resampleOrigKrnl = self.mod.get_function('resampleO')
self._resampleKrnl = self.mod.get_function('resample')
self._gradientKrnl = self.mod.get_function('gradient')
self._makeGKrnl = self.mod.get_function('makeG')
self._makeDiff = self.mod.get_function('makeDiff')
self._dotKrnl = self.mod.get_function('myDot')
self._addKrnl = self.mod.get_function('kadd')
# These ones use pyCuda reduction module to generate efficient kernels
self._mulRedKrnl = ReductionKernel(np.float32, neutral="0",
reduce_expr="a+b", map_expr="x[i]*y[i]",
arguments="float *x, float *y")
self._leastSquare = ReductionKernel(np.float32, neutral="0",
reduce_expr="a+b", map_expr="x[i]*x[i]",
arguments="float *x")
# We could have used use mulRedKrnl(x,x), but this is probably faster ?
# Getting texture references #
self.tex = self.mod.get_texref('tex')
self.tex_d = self.mod.get_texref('tex_d')
self.texMask = self.mod.get_texref('texMask')
# Setting proper flags #
# All textures use normalized coordinates except for the mask
for t in [self.tex, self.tex_d]:
t.set_flags(cuda.TRSF_NORMALIZED_COORDINATES)
for t in [self.tex, self.tex_d, self.texMask]:
t.set_filter_mode(cuda.filter_mode.LINEAR)
t.set_address_mode(0, cuda.address_mode.BORDER)
t.set_address_mode(1, cuda.address_mode.BORDER)
# Preparing kernels for less overhead when called #
self._resampleOrigKrnl.prepare("Pii", texrefs=[self.tex])
self._resampleKrnl.prepare("Pii", texrefs=[self.tex_d])
self._gradientKrnl.prepare("PP", texrefs=[self.tex])
self._makeDiff.prepare("PPPP",
texrefs=[self.tex, self.tex_d, self.texMask])
self._addKrnl.prepare("PfP")
# Reading original image if provided #
if kwargs.get("img") is not None:
self.set_orig(kwargs.get("img"))
# Reading fields if provided #
if kwargs.get("fields") is not None:
self.set_fields(*kwargs.get("fields"))
# Reading mask if provided #
if kwargs.get("mask") is not None:
self.set_mask(kwargs.get("mask"))
def debug(self, n, *s):
"""To print debug messages.
Note:
First argument is the level of the message.
The others arguments will be displayed only if the `self.debug` var is
superior or equal.
Also, flag and indentation reflect respectively the origin and the level
of the message.
"""
if n <= self.verbose:
s2 = ()
for i in range(len(s)):
s2 += (str(s[i]).replace("\n", "\n" + (10 + n) * " "),)
print(" " * (n - 1) + "[Stage " + str(self.num) + "]", *s2)
def set_orig(self, img):
"""To set the original image from a given CPU or GPU array.
Warning:
If it is a GPU array, it will NOT be copied.
Note:
The most efficient method is to write directly over `self.devOrig` with
some kernel and then run :meth:`update_orig`.
"""
assert img.shape == (self.h, self.w), \
"Got a {} image in a {} correlation routine!".format(
img.shape, (self.h, self.w))
if isinstance(img, np.ndarray):
self.debug(3, "Setting original image from ndarray")
self.devOrig.set(img)
elif isinstance(img, gpuarray.GPUArray):
self.debug(3, "Setting original image from GPUArray")
self.devOrig = img
else:
self.debug(0, "Error ! Unknown type of data given to set_orig()")
raise ValueError
self.update_orig()
def update_orig(self):
"""Needs to be called after `self.img_d` has been written directly."""
self.debug(3, "Updating original image")
self.array = cuda.gpuarray_to_array(self.devOrig, 'C')
# 'C' order implies tex2D(x,y) will fetch matrix(y,x):
# this is where x and y are inverted to comply with the kernels order
self.tex.set_array(self.array)
self._compute_gradients()
self._ready = False
def _compute_gradients(self):
"""Wrapper to call the gradient kernel."""
self._gradientKrnl.prepared_call(self.grid, self.block,
self.devGradX.gpudata, self.devGradY.gpudata)
def prepare(self):
"""Computes all necessary tables to perform correlation.
Note:
This method must be called everytime the original image or fields are
set.
If not done by the user, it will be done automatically when needed.
"""
if not hasattr(self, 'maskArray'):
self.debug(2, "No mask set when preparing, using a basic one, "
"with a border of 5% the dimension")
mask = np.zeros((self.h, self.w), np.float32)
mask[self.h // 20:-self.h // 20, self.w // 20:-self.w // 20] = 1
self.set_mask(mask)
if not self._ready:
if not hasattr(self, 'array'):
self.debug(1, "Tried to prepare but original texture is not set !")
elif not hasattr(self, 'fields'):
self.debug(1, "Tried to prepare but fields are not set !")
else:
self._make_g()
self._make_h()
self._ready = True
self.debug(3, "Ready!")
else:
self.debug(1, "Tried to prepare when unnecessary, doing nothing...")
def _make_g(self):
for i in range(self.Nfields):
# Change to prepared call ?
self._makeGKrnl(self.devG[i].gpudata, self.devGradX.gpudata,
self.devGradY.gpudata,
self.devFieldsX[i], self.devFieldsY[i],
block=self.block, grid=self.grid)
def _make_h(self):
for i in range(self.Nfields):
for j in range(i + 1):
self.H[i, j] = self._mulRedKrnl(self.devG[i], self.devG[j]).get()
if i != j:
self.H[j, i] = self.H[i, j]
self.debug(3, "Hessian:\n", self.H)
self.devHi.set(np.linalg.inv(self.H)) # *1e-3)
# Looks stupid but prevents a useless devHi copy if nothing is printed
if self.verbose >= 3:
self.debug(3, "Inverted Hessian:\n", self.devHi.get())
def resample_orig(self, new_y, new_x, dev_out):
"""To resample the original image.
Note:
Reads `orig.texture` and writes the interpolated `newX*newY` image to the
`devOut` array.
"""
grid = (int(ceil(new_x / 32)), int(ceil(new_y / 32)))
block = (int(ceil(new_x / grid[0])), int(ceil(new_y / grid[1])), 1)
self.debug(3, "Resampling Orig texture, grid:", grid, "block:", block)
self._resampleOrigKrnl.prepared_call(self.grid, self.block,
dev_out.gpudata,
np.int32(new_x), np.int32(new_y))
self.debug(3, "Resampled original texture to", dev_out.shape)
def resample_d(self, new_y, new_x):
"""Resamples `tex_d` and returns it in a `gpuarray`."""
if (self.rX, self.rY) != (np.int32(new_x), np.int32(new_y)):
self.rGrid = (int(ceil(new_x / 32)), int(ceil(new_y / 32)))
self.rBlock = (int(ceil(new_x / self.rGrid[0])),
int(ceil(new_y / self.rGrid[1])), 1)
self.rX, self.rY = np.int32(new_x), np.int32(new_y)
self.devROut = gpuarray.empty((new_y, new_x), np.float32)
self.debug(3, "Resampling img_d texture to", (new_y, new_x),
" grid:", self.rGrid, "block:", self.rBlock)
self._resampleKrnl.prepared_call(self.rGrid, self.rBlock,
self.devROut.gpudata,
self.rX, self.rY)
return self.devROut
def set_fields(self, fields_x, fields_y):
"""Method to give the fields to identify with the routine.
Note:
This is necessary only once and can be done multiple times, but the
routine have to be initialized with :meth:`prepare`, causing a slight
overhead.
Takes a :obj:`tuple` or :obj:`list` of 2 `(gpu)arrays[Nfields,x,y]` (one
for displacement along `x` and one along `y`).
"""
self.debug(2, "Setting fields")
if isinstance(fields_x, np.ndarray):
self.devFieldsX.set(fields_x)
self.devFieldsY.set(fields_y)
elif isinstance(fields_x, gpuarray.GPUArray):
self.devFieldsX = fields_x
self.devFieldsY = fields_y
self.fields = True
def set_image(self, img_d):
"""Set the image to compare with the original.
Note:
Calling this method is not necessary: you can do `.get_disp(image)`.
This will automatically call this method first.
"""
assert img_d.shape == (self.h, self.w), \
"Got a {} image in a {} correlation routine!".format(
img_d.shape, (self.h, self.w))
if isinstance(img_d, np.ndarray):
self.debug(3, "Creating texture from numpy array")
self.array_d = cuda.matrix_to_array(img_d, "C")
elif isinstance(img_d, gpuarray.GPUArray):
self.debug(3, "Creating texture from gpuarray")
self.array_d = cuda.gpuarray_to_array(img_d, "C")
else:
self.debug(0, "Error ! Unknown type of data given to .set_image()")
raise ValueError
self.tex_d.set_array(self.array_d)
self.devX.set(np.zeros(self.Nfields, dtype=np.float32))
def set_mask(self, mask):
self.debug(3, "Setting the mask")
assert mask.shape == (self.h, self.w), \
"Got a {} mask in a {} routine.".format(mask.shape, (self.h, self.w))
if not mask.dtype == np.float32:
self.debug(2, "Converting the mask to float32")
mask = mask.astype(np.float32)
if isinstance(mask, np.ndarray):
self.maskArray = cuda.matrix_to_array(mask, 'C')
elif isinstance(mask, gpuarray.GPUArray):
self.maskArray = cuda.gpuarray_to_array(mask, 'C')
else:
self.debug(0, "Error! Mask data type not understood")
raise ValueError
self.texMask.set_array(self.maskArray)
def set_disp(self, x):
assert x.shape == (self.Nfields,), \
"Incorrect initialization of the parameters"
if isinstance(x, gpuarray.GPUArray):
self.devX = x
elif isinstance(x, np.ndarray):
self.devX.set(x)
else:
self.debug(0, "Error! Unknown type of data given to "
"CorrelStage.set_disp")
raise ValueError
def write_diff_file(self):
self._makeDiff.prepared_call(self.grid, self.block,
self.devOut.gpudata,
self.devX.gpudata,
self.devFieldsX.gpudata,
self.devFieldsY.gpudata)
diff = (self.devOut.get() + 128).astype(np.uint8)
cv2.imwrite("/home/vic/diff/diff{}-{}.png"
.format(self.num, self.loop), diff)
def get_disp(self, img_d=None):
"""The method that actually computes the weight of the fields."""
self.debug(3, "Calling main routine")
self.loop += 1
# self.mul = 3
if not self._ready:
self.debug(2, "Wasn't ready ! Preparing...")
self.prepare()
if img_d is not None:
self.set_image(img_d)
assert hasattr(self, 'array_d'), \
"Did not set the image, use set_image() before calling get_disp \
or give the image as parameter."
self.debug(3, "Computing first diff table")
self._makeDiff.prepared_call(self.grid, self.block,
self.devOut.gpudata,
self.devX.gpudata,
self.devFieldsX.gpudata,
self.devFieldsY.gpudata)
self.res = self._leastSquare(self.devOut).get()
self.debug(3, "res:", self.res / 1e6)
# Iterating #
# Note: I know this section is dense and wrappers for kernel calls could
# have made things clearer, but function calls in python cause a
# non-negligible overhead and this is the critical part.
# The comments are here to guide you !
for i in range(self.nbIter):
self.debug(3, "Iteration", i)
for j in range(self.Nfields):
# Computing the direction of the gradient of each parameters
self.devVec[j] = self._mulRedKrnl(self.devG[j], self.devOut)
# Newton method: we multiply the gradient vector by the pre-inverted
# Hessian, devVec now contains the actual research direction.
self._dotKrnl(self.devHi, self.devVec,
grid=(1, 1), block=(self.Nfields, 1, 1))
# This line simply adds k times the research direction to devX
# with a really simple kernel (does self.devX += k*self.devVec)
self._addKrnl.prepared_call((1, 1), (self.Nfields, 1, 1),
self.devX.gpudata, self.mul,
self.devVec.gpudata)
# Do not get rid of this condition: it will not change the output but
# the parameters will be evaluated, this will copy data from the device
if self.verbose >= 3:
self.debug(3, "Direction:", self.devVec.get())
self.debug(3, "New X:", self.devX.get())
# To get the new residual
self._makeDiff.prepared_call(self.grid, self.block,
self.devOut.gpudata,
self.devX.gpudata,
self.devFieldsX.gpudata,
self.devFieldsY.gpudata)
oldres = self.res
self.res = self._leastSquare(self.devOut).get()
# If we moved away, revert changes and stop iterating
if self.res >= oldres:
self.debug(3, "Diverting from the solution new res={} >= {}!"
.format(self.res / 1e6, oldres / 1e6))
self._addKrnl.prepared_call((1, 1), (self.Nfields, 1, 1),
self.devX.gpudata,
-self.mul,
self.devVec.gpudata)
self.res = oldres
self.debug(3, "Undone: X=", self.devX.get())
break
self.debug(3, "res:", self.res / 1e6)
# self.write_diff_file()
if self.showDiff:
cv2.imshow("Residual", (self.devOut.get() + 128).astype(np.uint8))
cv2.waitKey(1)
return self.devX.get()
# =======================================================================#
# = =#
# = Class Correl: =#
# = =#
# =======================================================================#
class GPUCorrel:
"""Identify the displacement between two images.
This class is the core of the Correl block. It is meant to be efficient
enough to run in real-time.
It relies on :class:`CorrelStage` to perform correlation on different scales.
Requirements:
- The computer must have a Nvidia video card with compute capability
`>= 3.0`
- `CUDA 5.0` or higher (only tested with `CUDA 7.5`)
- `pycuda 2014.1` or higher (only tested with pycuda `2016.1.1`)
Presentation:
This class takes a :obj:`list` of fields. These fields will be the base of
deformation in which the displacement will be identified. When given two
images, it will identify the displacement between the original and the
second image in this base as closely as possible lowering square-residual
using provided displacements.
This class is highly flexible and performs on GPU for faster operation.
Usage:
At initialization, Correl needs only one unnamed argument: the working
resolution (as a :obj:`tuple` of :obj:`int`), which is the resolution of
the images it will be given. All the images must have exactly these
dimensions. The dimensions must be given in this order: `(y,x)` (like
`openCV` images)
At initialization or after, this class takes a reference image. The
deformations on this image are supposed to be all equal to `0`.
It also needs a number of deformation fields (technically limited to `~500`
fields, probably much less depending on the resolution and the amount of
memory on the graphics card).
Finally, you need to provide the deformed image you want to process. It
will then identify parameters of the sum of fields that lowers the square
sum of differences between the original image and the second one displaced
with the resulting field.
This class will resample the images and perform identification on a lower
resolution, use the result to initialize the next stage, and again util it
reaches the last stage. It will then return the computed parameters. The
number of levels can be set with ``levels=x``.
The latest parameters returned (if any) are used to initialize computation
when called again, to help identify large displacement. It is particularly
adapted to slow deformations.
To lower the residual, this program computes the gradient of each parameter
and uses Newton method to converge as fast as possible. The number of
iterations for the resolution can also be set.
Args:
img_size (:obj:`tuple`): tuple of 2 :obj:`int`, `(y,x)`, the working
resolution
verbose (:obj:`int`): Use ``verbose=x`` to choose the amount of information
printed to the console:
- `0`: Nothing except for errors
- `1`: Only important info and warnings
- `2`: Major info and a few values periodically (at a bearable rate)
- `3`: Tons of info including details of each iteration
Note that `verbose=3` REALLY slows the program down. To be used only for
debug.
fields (:obj:`list`): Use ``fields=[...]`` to set the fields. This can be
done later with :meth:`set_fields`, however in case when the fields are
set later, you need to add ``Nfields=x`` to specify at :meth:`__init__`
the number of expected fields in order to allocate all the necessary
memory on the device.
The fields should be given as a :obj:`list` of :obj:`tuple` of 2
`numpy.ndarrays` or `gpuarray.GPUArray` of the size of the image, each
array corresponds to the displacement in pixel along respectively `X` and
`Y`.
You can also use a :obj:`str` instead of the :obj:`tuple` for the common
fields:
- Rigid body and linear deformations:
- `'x'`: Movement along `X`
- `'y'`: Movement along `Y`
- `'r'`: Rotation (in the trigonometric direction)
- `'exx'`: Stretch along `X`
- `'eyy'`: Stretch along `Y`
- `'exy'`: Shear
- `'z'`: Zoom (dilatation) (`=exx+eyy`)
Note that you should not try to identify `exx`, `eyy` AND `z` at the
same time (one of them is redundant).
- Quadratic deformations:
These fields are more complicated to interpret but can be useful for
complicated solicitations such as biaxial stretch. `U` and `V`
represent the displacement along respectively `x` and `y`.
- `'uxx'`: `U(x,y) = x²`
- `'uyy'`: `U(x,y) = y²`
- `'uxy'`: `U(x,y) = xy`
- `'vxx'`: `V(x,y) = x²`
- `'vyy'`: `V(x,y) = y²`
- `'vxy'`: `V(x,y) = xy`
All of these default fields are normalized to have a max displacement of
`1` pixel and are centered in the middle of the image. They are generated
to have the size of your image.
You can mix strings and tuples at your convenience to perform your
identification.
Example:
::
fields=['x', 'y', (MyFieldX, MyFieldY)]
where `MyfieldX` and `MyfieldY` are numpy arrays with the same shape as
the images
Example of memory usage: On a 2048x2048 image, count roughly
`180 + 100*Nfields` MB of VRAM
img: The original image. It must be given as a 2D `numpy.ndarray`. This
block works with `dtype=np.float32`. If the `dtype` of the given image is
different, it will print a warning and the image will be converted. It
can be given at :meth:`__init__` with the kwarg ``img=MyImage`` or later
with ``set_orig(MyImage)``.
Note:
You can reset it whenever you want, even multiple times but it will
reset the def parameters to `0`.
Once fields and original image are set, there is a short preparation time
before correlation can be performed. You can do this preparation yourself
by using :meth:`prepare`. If not called, it will be done automatically
when necessary, inducing a slight overhead at the first call of
:meth:`get_disp` after setting/updating the fields or original image.
levels (:obj:`int`, optional): Number of levels of the pyramid. More levels
can help converging with large and quick deformations but may fail on
images without low spatial frequency. Fewer levels mean that the program
will run faster.
resampling_factor (:obj:`float`, optional): The resolution will be divided
by this parameter between each stage of the pyramid. Low, can allow
coherence between stages but is more expensive. High, reaches small
resolutions in less levels and is faster but be careful not to loose
consistency between stages.
iterations (:obj:`int`, optional): The MAXIMUM number of iteration to be
ran before returning the values. Note that if the residual increases
before reaching `x` iterations, the block will return anyway.
mask (optional): To set the mask, to weight the zone of interest on the
images. It is particularly useful to prevent undesired effects on the
border of the images. If no mask is given, a rectangular mask will be
used, with border of `5%` the size of the image.
show_diff (:obj:`bool`, optional): Will open a :mod:`cv2` window and print
the difference between the original and the displaced image after
correlation. `128 Gray` means no difference, lighter means positive and
darker negative.
kernel_file (:obj:`str`, optional): Where `crappy_install_dir` is the root
directory of the installation of crappy (``crappy.__path__``).
mul (:obj:`float`, optional): This parameter is critical. The direction
will be multiplied by this scalar before being added to the solution. It
defines how "fast" we move towards the solution. High value, fast
convergence but risk to go past the solution and diverge (the program
does not try to handle this and if the residual rises, iterations will
stop immediately). Low value, probably more precise but slower and may
require more iterations.
After multiple tests, 3 was found to be a pretty acceptable value. Don't
hesitate to adapt it to your case. Use ``verbose=3`` and see if the
convergence is too slow or too fast.
Note:
The compared image can be given directly when querying the displacement
as a parameter to :meth:`get_disp` or before, with :meth:`set_image`. You
can provide it as a `np.ndarray` just like `orig`, or as a
`pycuda.gpuarray.GPUArray`.
"""
# Todo
"""
This section lists all the considered improvements for this program.
These features may NOT all be implemented in the future.
They are sorted by priority.
- Allow faster execution by executing the reduction only on a part
of the images (random or chosen)
- Add the possibility to return the value of the deformation `Exx` and
`Eyy` in a specific point
- Add a parameter to return values in `%`
- Add a filter to smooth/ignore incorrect values
- Allow a reset of the reference picture for simple deformations to
enhance robustness in case of large deformations or lightning changes
- Restart iterating from `0` once in a while to see if the residual is
lower. Can be useful to recover when diverged critically due to an
incorrect image (Shadow, obstruction, flash, camera failure, ...)
"""
def __init__(self, img_size, **kwargs):
global context
if 'context' in kwargs:
context = kwargs.pop('context')
else:
cuda.init()
try:
from pycuda.tools import make_default_context
except (ImportError, ModuleNotFoundError):
make_default_context = OptionalModule("pycuda",
"PyCUDA and CUDA are necessary "
"to use GPUCorrel")
context = make_default_context()
unknown = []
for k in kwargs.keys():
if k not in ['verbose', 'levels', 'resampling_factor', 'kernel_file',
'iterations', 'show_diff', 'Nfields', 'img',
'fields', 'mask', 'mul']:
unknown.append(k)
if len(unknown) != 0:
warnings.warn("Unrecognized parameter" + (
's: ' + str(unknown) if len(unknown) > 1 else ': ' + unknown[0]),
SyntaxWarning)
self.verbose = kwargs.get("verbose", 0)
self.debug(3, "You set the verbose level to the maximum.\n\
It may help finding bugs or tracking errors but it may also \
impact the program performance as it will print A LOT of \
output and add GPU->CPU copies only to print information.\n\
If it is not desired, consider lowering the verbosity: \
1 or 2 is a reasonable choice, \
0 won't show anything except for errors.")
self.levels = kwargs.get("levels", 5)
self.loop = 0
self.resamplingFactor = kwargs.get("resampling_factor", 2)
h, w = img_size
self.nbIter = kwargs.get("iterations", 4)
self.debug(1, "Initializing... Master resolution:", img_size,
"levels:", self.levels, "verbosity:", self.verbose)
# Computing dimensions of the different levels #
self.h, self.w = [], []
for i in range(self.levels):
self.h.append(int(round(h / (self.resamplingFactor ** i))))
self.w.append(int(round(w / (self.resamplingFactor ** i))))
if kwargs.get("Nfields") is not None:
self.Nfields = kwargs.get("Nfields")
else:
try:
self.Nfields = len(kwargs["fields"])
except KeyError:
self.debug(0, "Error! You must provide the number of fields at init. \
Add Nfields=x or directly set fields with fields=list/tuple")
raise ValueError
kernel_file = kwargs.get("kernel_file")
if kernel_file is None:
self.debug(3, "Kernel file not specified, using the one in crappy dir")
from crappy import __path__ as crappy_path
kernel_file = crappy_path[0] + "/data/kernels.cu"
self.debug(3, "Kernel file:", kernel_file)
# Creating a new instance of CorrelStage for each stage #
self.correl = []
for i in range(self.levels):
self.correl.append(CorrelStage((self.h[i], self.w[i]),
verbose=self.verbose,
Nfields=self.Nfields,
iterations=self.nbIter,
show_diff=(i == 0 and kwargs.get(
"show_diff", False)),
mul=kwargs.get("mul", 3),
kernel_file=kernel_file))
# Set original image if provided #
if kwargs.get("img") is not None:
self.set_orig(kwargs.get("img"))
s = """
texture<float, cudaTextureType2D, cudaReadModeElementType> texFx{0};
texture<float, cudaTextureType2D, cudaReadModeElementType> texFy{0};
__global__ void resample{0}(float* outX, float* outY, int x, int y)
{{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if(idx < x && idy < y)
{{
outX[idy*x+idx] = tex2D(texFx{0},(float)idx/x, (float)idy/y);
outY[idy*x+idx] = tex2D(texFy{0},(float)idx/x, (float)idy/y);
}}
}}
"""
self.src = ""
for i in range(self.Nfields):
self.src += s.format(i) # Adding textures for the quick fields
# resampling
self.mod = SourceModule(self.src)
self.texFx = []
self.texFy = []
self.resampleF = []
for i in range(self.Nfields):
self.texFx.append(self.mod.get_texref("texFx%d" % i))
self.texFy.append(self.mod.get_texref("texFy%d" % i))
self.resampleF.append(self.mod.get_function("resample%d" % i))
self.resampleF[i].prepare("PPii", texrefs=[self.texFx[i], self.texFy[i]])
for t in self.texFx + self.texFy:
t.set_flags(cuda.TRSF_NORMALIZED_COORDINATES)
t.set_filter_mode(cuda.filter_mode.LINEAR)
t.set_address_mode(0, cuda.address_mode.BORDER)
t.set_address_mode(1, cuda.address_mode.BORDER)
# Set fields if provided #
if kwargs.get("fields") is not None:
self.set_fields(kwargs.get("fields"))
if kwargs.get("mask") is not None:
self.set_mask(kwargs.get("mask"))
def get_fields(self, y=None, x=None):
"""Returns the fields, resampled to size `(y,x)`."""
if x is None or y is None:
y = self.h[0]
x = self.w[0]
out_x = gpuarray.empty((self.Nfields, y, x), np.float32)
out_y = gpuarray.empty((self.Nfields, y, x), np.float32)
grid = (int(ceil(x / 32)), int(ceil(y / 32)))
block = (int(ceil(x / grid[0])), int(ceil(y / grid[1])), 1)
for i in range(self.Nfields):
self.resampleF[i].prepared_call(grid, block,
out_x[i, :, :].gpudata,
out_y[i, :, :].gpudata,
np.int32(x), np.int32(y))
return out_x, out_y
def debug(self, n, *s):
"""To print debug info.
First argument is the level of the message.
It wil be displayed only if the `self.debug` is superior or equal.
"""
if n <= self.verbose:
print(" " * (n - 1) + "[Correl]", *s)
def set_orig(self, img):
"""To set the original image.
This is the reference with which the second image will be compared.
"""
self.debug(2, "updating original image")
assert isinstance(img, np.ndarray), "Image must be a numpy array"
assert len(img.shape) == 2, "Image must have 2 dimensions (got {})" \
.format(len(img.shape))
assert img.shape == (self.h[0], self.w[0]), "Wrong size!"
if img.dtype != np.float32:
warnings.warn("Correl() takes arrays with dtype np.float32 \
to allow GPU computing (got {}). Converting to float32."
.format(img.dtype), RuntimeWarning)
img = img.astype(np.float32)
self.correl[0].set_orig(img)
for i in range(1, self.levels):
self.correl[i - 1].resample_orig(self.h[i], self.w[i],
self.correl[i].devOrig)
self.correl[i].update_orig()
def set_fields(self, fields):
assert self.Nfields == len(fields), \
"Cannot change the number of fields on the go!"
# Choosing the right function to copy
if isinstance(fields[0], str) or isinstance(fields[0][0], np.ndarray):
to_array = cuda.matrix_to_array
elif isinstance(fields[0][0], gpuarray.GPUArray):
to_array = cuda.gpuarray_to_array
else:
self.debug(0, "Error ! Incorrect fields argument. \
See docstring of Correl")
raise ValueError
# These list store the arrays for the fields texture
# (to be interpolated quickly for each stage)
self.fieldsXArray = []
self.fieldsYArray = []
for i in range(self.Nfields):
if isinstance(fields[i], str):
fields[i] = get_field(fields[i].lower(),
self.h[0], self.w[0])
self.fieldsXArray.append(to_array(fields[i][0], "C"))
self.texFx[i].set_array(self.fieldsXArray[i])
self.fieldsYArray.append(to_array(fields[i][1], "C"))
self.texFy[i].set_array(self.fieldsYArray[i])
for i in range(self.levels):
self.correl[i].set_fields(*self.get_fields(self.h[i], self.w[i]))
def prepare(self):
for c in self.correl:
c.prepare()
self.debug(2, "Ready!")
def save_all_images(self, name="out"):
try:
import cv2
except (ModuleNotFoundError, ImportError):
cv2 = OptionalModule("opencv-python")
self.debug(1, "Saving all images with the name", name + "X.png")
for i in range(self.levels):
out = self.correl[i].devOrig.get().astype(np.uint8)
cv2.imwrite(name + str(i) + ".png", out)
def set_image(self, img_d):
if img_d.dtype != np.float32:
warnings.warn("Correl() takes arrays with dtype np.float32 \
to allow GPU computing (got {}). Converting to float32."
.format(img_d.dtype), RuntimeWarning)
img_d = img_d.astype(np.float32)
self.correl[0].set_image(img_d)
for i in range(1, self.levels):
self.correl[i].set_image(
self.correl[i - 1].resample_d(self.correl[i].h, self.correl[i].w))
def set_mask(self, mask):
for i in range(self.levels):
self.correl[i].set_mask(interp_nearest(mask, self.h[i], self.w[i]))
def get_disp(self, img_d=None):
"""To get the displacement.
This will perform the correlation routine on each stage, initializing with
the previous values every time it will return the computed parameters
as a list.
"""
self.loop += 1
if img_d is not None:
self.set_image(img_d)
try:
disp = self.last / (self.resamplingFactor ** self.levels)
except AttributeError:
disp = np.array([0] * self.Nfields, dtype=np.float32)
for i in reversed(range(self.levels)):
disp *= self.resamplingFactor
self.correl[i].set_disp(disp)
disp = self.correl[i].get_disp()
self.last = disp
# Every 10 images, print the values (if debug >=2)
if self.loop % 10 == 0:
self.debug(2, "Loop", self.loop, ", values:", self.correl[0].devX.get(),
", res:", self.correl[0].res / 1e6)
return disp
def get_res(self, lvl=0):
"""Returns the last residual of the specified level (`0` by default).
Usually, the correlation is correct when `res < ~1e9-10` but it really
depends on the images: you need to find the value that suit your own
images, depending on the resolution, contrast, correlation method etc...
You can use :meth:`write_diff_file` to visualize the difference between the
two images after correlation.
"""
return self.correl[lvl].res
def write_diff_file(self, level=0):
"""To see the difference between the two images with the computed
parameters.
It writes a single channel picture named `"diff.png"` where `128 gray` is
exact equality, lighter pixels show positive difference and darker pixels
a negative difference. Useful to see if correlation succeeded and to
identify the origin of non convergence.
"""
self.correl[level].write_diff_file()
@staticmethod
def clean():
"""Needs to be called at the end, to destroy the context properly."""
context.pop()
|
photoninger/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/ping.py
|
65
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable Python is configured.
- This is NOT ICMP ping, this is just a trivial test module that requires Python on the remote-node.
- For Windows targets, use the M(win_ping) module instead.
- For Network targets, use the M(net_ping) module instead.
notes:
- For Windows targets, use the M(win_ping) module instead.
- For Network targets, use the M(net_ping) module instead.
options:
data:
description:
- Data to return for the C(ping) return value.
- If this parameter is set to C(crash), the module will cause an exception.
default: pong
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
# ansible webservers -m ping
# Example from an Ansible Playbook
- ping:
# Induce an exception to see what happens
- ping:
data: crash
'''
RETURN = '''
ping:
description: value provided with the data parameter
returned: success
type: string
sample: pong
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(type='str', default='pong'),
),
supports_check_mode=True
)
if module.params['data'] == 'crash':
raise Exception("boom")
result = dict(
ping=module.params['data'],
)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
edunham/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_handshake.py
|
452
|
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_quoted_data(self):
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_quoted_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
partofthething/home-assistant
|
refs/heads/dev
|
tests/components/mazda/test_sensor.py
|
2
|
"""The sensor tests for the Mazda Connected Services integration."""
from homeassistant.components.mazda.const import DOMAIN
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
PRESSURE_PSI,
)
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from tests.components.mazda import init_integration
async def test_device_nickname(hass):
"""Test creation of the device when vehicle has a nickname."""
await init_integration(hass, use_nickname=True)
device_registry = await hass.helpers.device_registry.async_get_registry()
reg_device = device_registry.async_get_device(
identifiers={(DOMAIN, "JM000000000000000")},
)
assert reg_device.model == "2021 MAZDA3 2.5 S SE AWD"
assert reg_device.manufacturer == "Mazda"
assert reg_device.name == "My Mazda3"
async def test_device_no_nickname(hass):
"""Test creation of the device when vehicle has no nickname."""
await init_integration(hass, use_nickname=False)
device_registry = await hass.helpers.device_registry.async_get_registry()
reg_device = device_registry.async_get_device(
identifiers={(DOMAIN, "JM000000000000000")},
)
assert reg_device.model == "2021 MAZDA3 2.5 S SE AWD"
assert reg_device.manufacturer == "Mazda"
assert reg_device.name == "2021 MAZDA3 2.5 S SE AWD"
async def test_sensors(hass):
"""Test creation of the sensors."""
await init_integration(hass)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Fuel Remaining Percentage
state = hass.states.get("sensor.my_mazda3_fuel_remaining_percentage")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "My Mazda3 Fuel Remaining Percentage"
)
assert state.attributes.get(ATTR_ICON) == "mdi:gas-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "87.0"
entry = entity_registry.async_get("sensor.my_mazda3_fuel_remaining_percentage")
assert entry
assert entry.unique_id == "JM000000000000000_fuel_remaining_percentage"
# Fuel Distance Remaining
state = hass.states.get("sensor.my_mazda3_fuel_distance_remaining")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Fuel Distance Remaining"
)
assert state.attributes.get(ATTR_ICON) == "mdi:gas-station"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.state == "381"
entry = entity_registry.async_get("sensor.my_mazda3_fuel_distance_remaining")
assert entry
assert entry.unique_id == "JM000000000000000_fuel_distance_remaining"
# Odometer
state = hass.states.get("sensor.my_mazda3_odometer")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Odometer"
assert state.attributes.get(ATTR_ICON) == "mdi:speedometer"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_KILOMETERS
assert state.state == "2796"
entry = entity_registry.async_get("sensor.my_mazda3_odometer")
assert entry
assert entry.unique_id == "JM000000000000000_odometer"
# Front Left Tire Pressure
state = hass.states.get("sensor.my_mazda3_front_left_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Front Left Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.state == "35"
entry = entity_registry.async_get("sensor.my_mazda3_front_left_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_front_left_tire_pressure"
# Front Right Tire Pressure
state = hass.states.get("sensor.my_mazda3_front_right_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME)
== "My Mazda3 Front Right Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.state == "35"
entry = entity_registry.async_get("sensor.my_mazda3_front_right_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_front_right_tire_pressure"
# Rear Left Tire Pressure
state = hass.states.get("sensor.my_mazda3_rear_left_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Rear Left Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.state == "33"
entry = entity_registry.async_get("sensor.my_mazda3_rear_left_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_rear_left_tire_pressure"
# Rear Right Tire Pressure
state = hass.states.get("sensor.my_mazda3_rear_right_tire_pressure")
assert state
assert (
state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Rear Right Tire Pressure"
)
assert state.attributes.get(ATTR_ICON) == "mdi:car-tire-alert"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PRESSURE_PSI
assert state.state == "33"
entry = entity_registry.async_get("sensor.my_mazda3_rear_right_tire_pressure")
assert entry
assert entry.unique_id == "JM000000000000000_rear_right_tire_pressure"
async def test_sensors_imperial_units(hass):
"""Test that the sensors work properly with imperial units."""
hass.config.units = IMPERIAL_SYSTEM
await init_integration(hass)
# Fuel Distance Remaining
state = hass.states.get("sensor.my_mazda3_fuel_distance_remaining")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILES
assert state.state == "237"
# Odometer
state = hass.states.get("sensor.my_mazda3_odometer")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILES
assert state.state == "1737"
|
shliujing/v2ex
|
refs/heads/master
|
html5lib/tests/test_stream.py
|
74
|
import support
import unittest, codecs
from html5lib.inputstream import HTMLInputStream
class HTMLInputStreamShortChunk(HTMLInputStream):
_defaultChunkSize = 2
class HTMLInputStreamTest(unittest.TestCase):
def test_char_ascii(self):
stream = HTMLInputStream("'", encoding='ascii')
self.assertEquals(stream.charEncoding[0], 'ascii')
self.assertEquals(stream.char(), "'")
def test_char_null(self):
stream = HTMLInputStream("\x00")
self.assertEquals(stream.char(), u'\ufffd')
def test_char_utf8(self):
stream = HTMLInputStream(u'\u2018'.encode('utf-8'), encoding='utf-8')
self.assertEquals(stream.charEncoding[0], 'utf-8')
self.assertEquals(stream.char(), u'\u2018')
def test_char_win1252(self):
stream = HTMLInputStream(u"\xa9\xf1\u2019".encode('windows-1252'))
self.assertEquals(stream.charEncoding[0], 'windows-1252')
self.assertEquals(stream.char(), u"\xa9")
self.assertEquals(stream.char(), u"\xf1")
self.assertEquals(stream.char(), u"\u2019")
def test_bom(self):
stream = HTMLInputStream(codecs.BOM_UTF8 + "'")
self.assertEquals(stream.charEncoding[0], 'utf-8')
self.assertEquals(stream.char(), "'")
def test_utf_16(self):
stream = HTMLInputStream((' '*1025).encode('utf-16'))
self.assert_(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding)
self.assertEquals(len(stream.charsUntil(' ', True)), 1025)
def test_newlines(self):
stream = HTMLInputStreamShortChunk(codecs.BOM_UTF8 + "a\nbb\r\nccc\rddddxe")
self.assertEquals(stream.position(), (1, 0))
self.assertEquals(stream.charsUntil('c'), u"a\nbb\n")
self.assertEquals(stream.position(), (3, 0))
self.assertEquals(stream.charsUntil('x'), u"ccc\ndddd")
self.assertEquals(stream.position(), (4, 4))
self.assertEquals(stream.charsUntil('e'), u"x")
self.assertEquals(stream.position(), (4, 5))
def test_newlines2(self):
size = HTMLInputStream._defaultChunkSize
stream = HTMLInputStream("\r" * size + "\n")
self.assertEquals(stream.charsUntil('x'), "\n" * size)
def test_position(self):
stream = HTMLInputStreamShortChunk(codecs.BOM_UTF8 + "a\nbb\nccc\nddde\nf\ngh")
self.assertEquals(stream.position(), (1, 0))
self.assertEquals(stream.charsUntil('c'), u"a\nbb\n")
self.assertEquals(stream.position(), (3, 0))
stream.unget(u"\n")
self.assertEquals(stream.position(), (2, 2))
self.assertEquals(stream.charsUntil('c'), u"\n")
self.assertEquals(stream.position(), (3, 0))
stream.unget(u"\n")
self.assertEquals(stream.position(), (2, 2))
self.assertEquals(stream.char(), u"\n")
self.assertEquals(stream.position(), (3, 0))
self.assertEquals(stream.charsUntil('e'), u"ccc\nddd")
self.assertEquals(stream.position(), (4, 3))
self.assertEquals(stream.charsUntil('h'), u"e\nf\ng")
self.assertEquals(stream.position(), (6, 1))
def test_position2(self):
stream = HTMLInputStreamShortChunk("abc\nd")
self.assertEquals(stream.position(), (1, 0))
self.assertEquals(stream.char(), u"a")
self.assertEquals(stream.position(), (1, 1))
self.assertEquals(stream.char(), u"b")
self.assertEquals(stream.position(), (1, 2))
self.assertEquals(stream.char(), u"c")
self.assertEquals(stream.position(), (1, 3))
self.assertEquals(stream.char(), u"\n")
self.assertEquals(stream.position(), (2, 0))
self.assertEquals(stream.char(), u"d")
self.assertEquals(stream.position(), (2, 1))
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.