repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kivio/PerfKitBenchmarker
|
refs/heads/master
|
perfkitbenchmarker/packages/multilib.py
|
8
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing multilib installation and cleanup functions."""
def YumInstall(vm):
"""Installs multilib packages on the VM."""
vm.InstallPackages('glibc-devel.i686 libstdc++-devel.i686')
def AptInstall(vm):
"""Installs multilib packages on the VM."""
vm.InstallPackages('gcc-multilib g++-multilib')
|
was4444/chromium.src
|
refs/heads/nw15
|
third_party/cython/src/Cython/Compiler/ModuleNode.py
|
87
|
#
# Module parse tree node
#
import cython
cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
error=object, warning=object, py_object_type=object, UtilityCode=object,
EncodedString=object)
import os
import operator
from PyrexTypes import CPtrType
import Future
import Annotate
import Code
import Naming
import Nodes
import Options
import TypeSlots
import Version
import PyrexTypes
from Errors import error, warning
from PyrexTypes import py_object_type
from Cython.Utils import open_new_file, replace_suffix, decode_filename
from Code import UtilityCode
from StringEncoding import EncodedString
def check_c_declarations_pxd(module_node):
module_node.scope.check_c_classes_pxd()
return module_node
def check_c_declarations(module_node):
module_node.scope.check_c_classes()
module_node.scope.check_c_functions()
return module_node
class ModuleNode(Nodes.Node, Nodes.BlockNode):
# doc string or None
# body StatListNode
#
# referenced_modules [ModuleScope]
# full_module_name string
#
# scope The module scope.
# compilation_source A CompilationSource (see Main)
# directives Top-level compiler directives
child_attrs = ["body"]
directives = None
def merge_in(self, tree, scope, merge_scope=False):
# Merges in the contents of another tree, and possibly scope. With the
# current implementation below, this must be done right prior
# to code generation.
#
# Note: This way of doing it seems strange -- I believe the
# right concept is to split ModuleNode into a ModuleNode and a
# CodeGenerator, and tell that CodeGenerator to generate code
# from multiple sources.
assert isinstance(self.body, Nodes.StatListNode)
if isinstance(tree, Nodes.StatListNode):
self.body.stats.extend(tree.stats)
else:
self.body.stats.append(tree)
self.scope.utility_code_list.extend(scope.utility_code_list)
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
L1.append(x)
extend_if_not_in(self.scope.include_files, scope.include_files)
extend_if_not_in(self.scope.included_files, scope.included_files)
extend_if_not_in(self.scope.python_include_files,
scope.python_include_files)
if merge_scope:
# Ensure that we don't generate import code for these entries!
for entry in scope.c_class_entries:
entry.type.module_name = self.full_module_name
entry.type.scope.directives["internal"] = True
self.scope.merge_in(scope)
def analyse_declarations(self, env):
if not Options.docstrings:
env.doc = self.doc = None
elif Options.embed_pos_in_docstring:
env.doc = EncodedString(u'File: %s (starting at line %s)' % Nodes.relative_position(self.pos))
if not self.doc is None:
env.doc = EncodedString(env.doc + u'\n' + self.doc)
env.doc.encoding = self.doc.encoding
else:
env.doc = self.doc
env.directives = self.directives
self.body.analyse_declarations(env)
def process_implementation(self, options, result):
env = self.scope
env.return_type = PyrexTypes.c_void_type
self.referenced_modules = []
self.find_referenced_modules(env, self.referenced_modules, {})
self.sort_cdef_classes(env)
self.generate_c_code(env, options, result)
self.generate_h_code(env, options, result)
self.generate_api_code(env, result)
def has_imported_c_functions(self):
for module in self.referenced_modules:
for entry in module.cfunc_entries:
if entry.defined_in_pxd:
return 1
return 0
def generate_h_code(self, env, options, result):
def h_entries(entries, api=0, pxd=0):
return [entry for entry in entries
if ((entry.visibility == 'public') or
(api and entry.api) or
(pxd and entry.defined_in_pxd))]
h_types = h_entries(env.type_entries, api=1)
h_vars = h_entries(env.var_entries)
h_funcs = h_entries(env.cfunc_entries)
h_extension_types = h_entries(env.c_class_entries)
if (h_types or h_vars or h_funcs or h_extension_types):
result.h_file = replace_suffix(result.c_file, ".h")
h_code = Code.CCodeWriter()
Code.GlobalState(h_code, self)
if options.generate_pxi:
result.i_file = replace_suffix(result.c_file, ".pxi")
i_code = Code.PyrexCodeWriter(result.i_file)
else:
i_code = None
h_guard = Naming.h_guard_prefix + self.api_name(env)
h_code.put_h_guard(h_guard)
h_code.putln("")
self.generate_type_header_code(h_types, h_code)
if options.capi_reexport_cincludes:
self.generate_includes(env, [], h_code)
h_code.putln("")
api_guard = Naming.api_guard_prefix + self.api_name(env)
h_code.putln("#ifndef %s" % api_guard)
h_code.putln("")
self.generate_extern_c_macro_definition(h_code)
if h_extension_types:
h_code.putln("")
for entry in h_extension_types:
self.generate_cclass_header_code(entry.type, h_code)
if i_code:
self.generate_cclass_include_code(entry.type, i_code)
if h_funcs:
h_code.putln("")
for entry in h_funcs:
self.generate_public_declaration(entry, h_code, i_code)
if h_vars:
h_code.putln("")
for entry in h_vars:
self.generate_public_declaration(entry, h_code, i_code)
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
h_code.putln("")
h_code.putln("#if PY_MAJOR_VERSION < 3")
h_code.putln("PyMODINIT_FUNC init%s(void);" % env.module_name)
h_code.putln("#else")
h_code.putln("PyMODINIT_FUNC PyInit_%s(void);" % env.module_name)
h_code.putln("#endif")
h_code.putln("")
h_code.putln("#endif /* !%s */" % h_guard)
f = open_new_file(result.h_file)
try:
h_code.copyto(f)
finally:
f.close()
def generate_public_declaration(self, entry, h_code, i_code):
h_code.putln("%s %s;" % (
Naming.extern_c_macro,
entry.type.declaration_code(
entry.cname, dll_linkage = "DL_IMPORT")))
if i_code:
i_code.putln("cdef extern %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
def api_name(self, env):
return env.qualified_name.replace(".", "__")
def generate_api_code(self, env, result):
def api_entries(entries, pxd=0):
return [entry for entry in entries
if entry.api or (pxd and entry.defined_in_pxd)]
api_vars = api_entries(env.var_entries)
api_funcs = api_entries(env.cfunc_entries)
api_extension_types = api_entries(env.c_class_entries)
if api_vars or api_funcs or api_extension_types:
result.api_file = replace_suffix(result.c_file, "_api.h")
h_code = Code.CCodeWriter()
Code.GlobalState(h_code, self)
api_guard = Naming.api_guard_prefix + self.api_name(env)
h_code.put_h_guard(api_guard)
h_code.putln('#include "Python.h"')
if result.h_file:
h_code.putln('#include "%s"' % os.path.basename(result.h_file))
if api_extension_types:
h_code.putln("")
for entry in api_extension_types:
type = entry.type
h_code.putln("static PyTypeObject *%s = 0;" % type.typeptr_cname)
h_code.putln("#define %s (*%s)" % (
type.typeobj_cname, type.typeptr_cname))
if api_funcs:
h_code.putln("")
for entry in api_funcs:
type = CPtrType(entry.type)
cname = env.mangle(Naming.func_prefix, entry.name)
h_code.putln("static %s = 0;" % type.declaration_code(cname))
h_code.putln("#define %s %s" % (entry.name, cname))
if api_vars:
h_code.putln("")
for entry in api_vars:
type = CPtrType(entry.type)
cname = env.mangle(Naming.varptr_prefix, entry.name)
h_code.putln("static %s = 0;" % type.declaration_code(cname))
h_code.putln("#define %s (*%s)" % (entry.name, cname))
h_code.put(UtilityCode.load_as_string("PyIdentifierFromString", "ImportExport.c")[0])
h_code.put(UtilityCode.load_as_string("ModuleImport", "ImportExport.c")[1])
if api_vars:
h_code.put(UtilityCode.load_as_string("VoidPtrImport", "ImportExport.c")[1])
if api_funcs:
h_code.put(UtilityCode.load_as_string("FunctionImport", "ImportExport.c")[1])
if api_extension_types:
h_code.put(UtilityCode.load_as_string("TypeImport", "ImportExport.c")[1])
h_code.putln("")
h_code.putln("static int import_%s(void) {" % self.api_name(env))
h_code.putln("PyObject *module = 0;")
h_code.putln('module = __Pyx_ImportModule("%s");' % env.qualified_name)
h_code.putln("if (!module) goto bad;")
for entry in api_funcs:
cname = env.mangle(Naming.func_prefix, entry.name)
sig = entry.type.signature_string()
h_code.putln(
'if (__Pyx_ImportFunction(module, "%s", (void (**)(void))&%s, "%s") < 0) goto bad;'
% (entry.name, cname, sig))
for entry in api_vars:
cname = env.mangle(Naming.varptr_prefix, entry.name)
sig = entry.type.declaration_code("")
h_code.putln(
'if (__Pyx_ImportVoidPtr(module, "%s", (void **)&%s, "%s") < 0) goto bad;'
% (entry.name, cname, sig))
h_code.putln("Py_DECREF(module); module = 0;")
for entry in api_extension_types:
self.generate_type_import_call(
entry.type, h_code,
"if (!%s) goto bad;" % entry.type.typeptr_cname)
h_code.putln("return 0;")
h_code.putln("bad:")
h_code.putln("Py_XDECREF(module);")
h_code.putln("return -1;")
h_code.putln("}")
h_code.putln("")
h_code.putln("#endif /* !%s */" % api_guard)
f = open_new_file(result.api_file)
try:
h_code.copyto(f)
finally:
f.close()
def generate_cclass_header_code(self, type, h_code):
h_code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
type.typeobj_cname))
def generate_cclass_include_code(self, type, i_code):
i_code.putln("cdef extern class %s.%s:" % (
type.module_name, type.name))
i_code.indent()
var_entries = type.scope.var_entries
if var_entries:
for entry in var_entries:
i_code.putln("cdef %s" %
entry.type.declaration_code(entry.cname, pyrex = 1))
else:
i_code.putln("pass")
i_code.dedent()
def generate_c_code(self, env, options, result):
modules = self.referenced_modules
if Options.annotate or options.annotate:
emit_linenums = False
rootwriter = Annotate.AnnotationCCodeWriter()
else:
emit_linenums = options.emit_linenums
rootwriter = Code.CCodeWriter(emit_linenums=emit_linenums, c_line_in_traceback=options.c_line_in_traceback)
globalstate = Code.GlobalState(rootwriter, self, emit_linenums, options.common_utility_include_dir)
globalstate.initialize_main_c_code()
h_code = globalstate['h_code']
self.generate_module_preamble(env, modules, h_code)
globalstate.module_pos = self.pos
globalstate.directives = self.directives
globalstate.use_utility_code(refnanny_utility_code)
code = globalstate['before_global_var']
code.putln('#define __Pyx_MODULE_NAME "%s"' % self.full_module_name)
code.putln("int %s%s = 0;" % (Naming.module_is_main, self.full_module_name.replace('.', '__')))
code.putln("")
code.putln("/* Implementation of '%s' */" % env.qualified_name)
code = globalstate['all_the_rest']
self.generate_cached_builtins_decls(env, code)
self.generate_lambda_definitions(env, code)
# generate normal variable and function definitions
self.generate_variable_definitions(env, code)
self.body.generate_function_definitions(env, code)
code.mark_pos(None)
self.generate_typeobj_definitions(env, code)
self.generate_method_table(env, code)
if env.has_import_star:
self.generate_import_star(env, code)
self.generate_pymoduledef_struct(env, code)
# init_globals is inserted before this
self.generate_module_init_func(modules[:-1], env, globalstate['init_module'])
self.generate_module_cleanup_func(env, globalstate['cleanup_module'])
if Options.embed:
self.generate_main_method(env, globalstate['main_method'])
self.generate_filename_table(globalstate['filename_table'])
self.generate_declarations_for_modules(env, modules, globalstate)
h_code.write('\n')
for utilcode in env.utility_code_list[:]:
globalstate.use_utility_code(utilcode)
globalstate.finalize_main_c_code()
f = open_new_file(result.c_file)
try:
rootwriter.copyto(f)
finally:
f.close()
result.c_file_generated = 1
if options.gdb_debug:
self._serialize_lineno_map(env, rootwriter)
if Options.annotate or options.annotate:
self._generate_annotations(rootwriter, result)
def _generate_annotations(self, rootwriter, result):
self.annotate(rootwriter)
rootwriter.save_annotation(result.main_source_file, result.c_file)
# if we included files, additionally generate one annotation file for each
if not self.scope.included_files:
return
search_include_file = self.scope.context.search_include_directories
target_dir = os.path.abspath(os.path.dirname(result.c_file))
for included_file in self.scope.included_files:
target_file = os.path.abspath(os.path.join(target_dir, included_file))
target_file_dir = os.path.dirname(target_file)
if not target_file_dir.startswith(target_dir):
# any other directories may not be writable => avoid trying
continue
source_file = search_include_file(included_file, "", self.pos, include=True)
if not source_file:
continue
if target_file_dir != target_dir and not os.path.exists(target_file_dir):
try:
os.makedirs(target_file_dir)
except OSError, e:
import errno
if e.errno != errno.EEXIST:
raise
rootwriter.save_annotation(source_file, target_file)
def _serialize_lineno_map(self, env, ccodewriter):
tb = env.context.gdb_debug_outputwriter
markers = ccodewriter.buffer.allmarkers()
d = {}
for c_lineno, cython_lineno in enumerate(markers):
if cython_lineno > 0:
d.setdefault(cython_lineno, []).append(c_lineno + 1)
tb.start('LineNumberMapping')
for cython_lineno, c_linenos in sorted(d.iteritems()):
attrs = {
'c_linenos': ' '.join(map(str, c_linenos)),
'cython_lineno': str(cython_lineno),
}
tb.start('LineNumber', attrs)
tb.end('LineNumber')
tb.end('LineNumberMapping')
tb.serialize()
def find_referenced_modules(self, env, module_list, modules_seen):
if env not in modules_seen:
modules_seen[env] = 1
for imported_module in env.cimported_modules:
self.find_referenced_modules(imported_module, module_list, modules_seen)
module_list.append(env)
def sort_types_by_inheritance(self, type_dict, type_order, getkey):
# copy the types into a list moving each parent type before
# its first child
type_list = []
for i, key in enumerate(type_order):
new_entry = type_dict[key]
# collect all base classes to check for children
hierarchy = set()
base = new_entry
while base:
base_type = base.type.base_type
if not base_type:
break
base_key = getkey(base_type)
hierarchy.add(base_key)
base = type_dict.get(base_key)
new_entry.base_keys = hierarchy
# find the first (sub-)subclass and insert before that
for j in range(i):
entry = type_list[j]
if key in entry.base_keys:
type_list.insert(j, new_entry)
break
else:
type_list.append(new_entry)
return type_list
def sort_type_hierarchy(self, module_list, env):
# poor developer's OrderedDict
vtab_dict, vtab_dict_order = {}, []
vtabslot_dict, vtabslot_dict_order = {}, []
for module in module_list:
for entry in module.c_class_entries:
if entry.used and not entry.in_cinclude:
type = entry.type
key = type.vtabstruct_cname
if not key:
continue
if key in vtab_dict:
# FIXME: this should *never* happen, but apparently it does
# for Cython generated utility code
from Cython.Compiler.UtilityCode import NonManglingModuleScope
assert isinstance(entry.scope, NonManglingModuleScope), str(entry.scope)
assert isinstance(vtab_dict[key].scope, NonManglingModuleScope), str(vtab_dict[key].scope)
else:
vtab_dict[key] = entry
vtab_dict_order.append(key)
all_defined_here = module is env
for entry in module.type_entries:
if entry.used and (all_defined_here or entry.defined_in_pxd):
type = entry.type
if type.is_extension_type and not entry.in_cinclude:
type = entry.type
key = type.objstruct_cname
assert key not in vtabslot_dict, key
vtabslot_dict[key] = entry
vtabslot_dict_order.append(key)
def vtabstruct_cname(entry_type):
return entry_type.vtabstruct_cname
vtab_list = self.sort_types_by_inheritance(
vtab_dict, vtab_dict_order, vtabstruct_cname)
def objstruct_cname(entry_type):
return entry_type.objstruct_cname
vtabslot_list = self.sort_types_by_inheritance(
vtabslot_dict, vtabslot_dict_order, objstruct_cname)
return (vtab_list, vtabslot_list)
def sort_cdef_classes(self, env):
key_func = operator.attrgetter('objstruct_cname')
entry_dict, entry_order = {}, []
for entry in env.c_class_entries:
key = key_func(entry.type)
assert key not in entry_dict, key
entry_dict[key] = entry
entry_order.append(key)
env.c_class_entries[:] = self.sort_types_by_inheritance(
entry_dict, entry_order, key_func)
def generate_type_definitions(self, env, modules, vtab_list, vtabslot_list, code):
# TODO: Why are these separated out?
for entry in vtabslot_list:
self.generate_objstruct_predeclaration(entry.type, code)
vtabslot_entries = set(vtabslot_list)
for module in modules:
definition = module is env
if definition:
type_entries = module.type_entries
else:
type_entries = []
for entry in module.type_entries:
if entry.defined_in_pxd:
type_entries.append(entry)
type_entries = [t for t in type_entries if t not in vtabslot_entries]
self.generate_type_header_code(type_entries, code)
for entry in vtabslot_list:
self.generate_objstruct_definition(entry.type, code)
self.generate_typeobj_predeclaration(entry, code)
for entry in vtab_list:
self.generate_typeobj_predeclaration(entry, code)
self.generate_exttype_vtable_struct(entry, code)
self.generate_exttype_vtabptr_declaration(entry, code)
self.generate_exttype_final_methods_declaration(entry, code)
def generate_declarations_for_modules(self, env, modules, globalstate):
typecode = globalstate['type_declarations']
typecode.putln("")
typecode.putln("/*--- Type declarations ---*/")
# This is to work around the fact that array.h isn't part of the C-API,
# but we need to declare it earlier than utility code.
if 'cpython.array' in [m.qualified_name for m in modules]:
typecode.putln('#ifndef _ARRAYARRAY_H')
typecode.putln('struct arrayobject;')
typecode.putln('typedef struct arrayobject arrayobject;')
typecode.putln('#endif')
vtab_list, vtabslot_list = self.sort_type_hierarchy(modules, env)
self.generate_type_definitions(
env, modules, vtab_list, vtabslot_list, typecode)
modulecode = globalstate['module_declarations']
for module in modules:
defined_here = module is env
modulecode.putln("")
modulecode.putln("/* Module declarations from '%s' */" % module.qualified_name)
self.generate_c_class_declarations(module, modulecode, defined_here)
self.generate_cvariable_declarations(module, modulecode, defined_here)
self.generate_cfunction_declarations(module, modulecode, defined_here)
def generate_module_preamble(self, env, cimported_modules, code):
code.putln("/* Generated by Cython %s */" % Version.watermark)
code.putln("")
code.putln("#define PY_SSIZE_T_CLEAN")
# sizeof(PyLongObject.ob_digit[0]) may have been determined dynamically
# at compile time in CPython, in which case we can't know the correct
# storage size for an installed system. We can rely on it only if
# pyconfig.h defines it statically, i.e. if it was set by "configure".
# Once we include "Python.h", it will come up with its own idea about
# a suitable value, which may or may not match the real one.
code.putln("#ifndef CYTHON_USE_PYLONG_INTERNALS")
code.putln("#ifdef PYLONG_BITS_IN_DIGIT")
# assume it's an incorrect left-over
code.putln("#define CYTHON_USE_PYLONG_INTERNALS 0")
code.putln("#else")
code.putln('#include "pyconfig.h"')
code.putln("#ifdef PYLONG_BITS_IN_DIGIT")
code.putln("#define CYTHON_USE_PYLONG_INTERNALS 1")
code.putln("#else")
code.putln("#define CYTHON_USE_PYLONG_INTERNALS 0")
code.putln("#endif")
code.putln("#endif")
code.putln("#endif")
for filename in env.python_include_files:
code.putln('#include "%s"' % filename)
code.putln("#ifndef Py_PYTHON_H")
code.putln(" #error Python headers needed to compile C extensions, please install development version of Python.")
code.putln("#elif PY_VERSION_HEX < 0x02040000")
code.putln(" #error Cython requires Python 2.4+.")
code.putln("#else")
code.globalstate["end"].putln("#endif /* Py_PYTHON_H */")
from Cython import __version__
code.putln('#define CYTHON_ABI "%s"' % __version__.replace('.', '_'))
code.put(UtilityCode.load_as_string("CModulePreamble", "ModuleSetupCode.c")[1])
code.put("""
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
""")
if Future.division in env.context.future_directives:
code.putln(" #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)")
code.putln(" #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)")
else:
code.putln(" #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)")
code.putln(" #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)")
code.putln("#endif")
code.putln("")
self.generate_extern_c_macro_definition(code)
code.putln("")
code.putln("#if defined(WIN32) || defined(MS_WINDOWS)")
code.putln("#define _USE_MATH_DEFINES")
code.putln("#endif")
code.putln("#include <math.h>")
code.putln("#define %s" % Naming.h_guard_prefix + self.api_name(env))
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
self.generate_includes(env, cimported_modules, code)
code.putln("")
code.putln("#ifdef PYREX_WITHOUT_ASSERTIONS")
code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
code.putln("#endif")
code.putln("")
if env.directives['ccomplex']:
code.putln("")
code.putln("#if !defined(CYTHON_CCOMPLEX)")
code.putln("#define CYTHON_CCOMPLEX 1")
code.putln("#endif")
code.putln("")
code.put(UtilityCode.load_as_string("UtilityFunctionPredeclarations", "ModuleSetupCode.c")[0])
c_string_type = env.directives['c_string_type']
c_string_encoding = env.directives['c_string_encoding']
if c_string_type not in ('bytes', 'bytearray') and not c_string_encoding:
error(self.pos, "a default encoding must be provided if c_string_type is not a byte type")
code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII %s' % int(c_string_encoding == 'ascii'))
if c_string_encoding == 'default':
code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 1')
else:
code.putln('#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0')
code.putln('#define __PYX_DEFAULT_STRING_ENCODING "%s"' % c_string_encoding)
if c_string_type == 'bytearray':
c_string_func_name = 'ByteArray'
else:
c_string_func_name = c_string_type.title()
code.putln('#define __Pyx_PyObject_FromString __Pyx_Py%s_FromString' % c_string_func_name)
code.putln('#define __Pyx_PyObject_FromStringAndSize __Pyx_Py%s_FromStringAndSize' % c_string_func_name)
code.put(UtilityCode.load_as_string("TypeConversions", "TypeConversion.c")[0])
# These utility functions are assumed to exist and used elsewhere.
PyrexTypes.c_long_type.create_to_py_utility_code(env)
PyrexTypes.c_long_type.create_from_py_utility_code(env)
PyrexTypes.c_int_type.create_from_py_utility_code(env)
code.put(Nodes.branch_prediction_macros)
code.putln('')
code.putln('static PyObject *%s;' % env.module_cname)
code.putln('static PyObject *%s;' % env.module_dict_cname)
code.putln('static PyObject *%s;' % Naming.builtins_cname)
code.putln('static PyObject *%s;' % Naming.empty_tuple)
code.putln('static PyObject *%s;' % Naming.empty_bytes)
if Options.pre_import is not None:
code.putln('static PyObject *%s;' % Naming.preimport_cname)
code.putln('static int %s;' % Naming.lineno_cname)
code.putln('static int %s = 0;' % Naming.clineno_cname)
code.putln('static const char * %s= %s;' % (Naming.cfilenm_cname, Naming.file_c_macro))
code.putln('static const char *%s;' % Naming.filename_cname)
def generate_extern_c_macro_definition(self, code):
name = Naming.extern_c_macro
code.putln("#ifndef %s" % name)
code.putln(" #ifdef __cplusplus")
code.putln(' #define %s extern "C"' % name)
code.putln(" #else")
code.putln(" #define %s extern" % name)
code.putln(" #endif")
code.putln("#endif")
def generate_includes(self, env, cimported_modules, code):
includes = []
for filename in env.include_files:
byte_decoded_filenname = str(filename)
if byte_decoded_filenname[0] == '<' and byte_decoded_filenname[-1] == '>':
code.putln('#include %s' % byte_decoded_filenname)
else:
code.putln('#include "%s"' % byte_decoded_filenname)
code.putln_openmp("#include <omp.h>")
def generate_filename_table(self, code):
code.putln("")
code.putln("static const char *%s[] = {" % Naming.filetable_cname)
if code.globalstate.filename_list:
for source_desc in code.globalstate.filename_list:
filename = os.path.basename(source_desc.get_filenametable_entry())
escaped_filename = filename.replace("\\", "\\\\").replace('"', r'\"')
code.putln('"%s",' % escaped_filename)
else:
# Some C compilers don't like an empty array
code.putln("0")
code.putln("};")
def generate_type_predeclarations(self, env, code):
pass
def generate_type_header_code(self, type_entries, code):
# Generate definitions of structs/unions/enums/typedefs/objstructs.
#self.generate_gcc33_hack(env, code) # Is this still needed?
# Forward declarations
for entry in type_entries:
if not entry.in_cinclude:
#print "generate_type_header_code:", entry.name, repr(entry.type) ###
type = entry.type
if type.is_typedef: # Must test this first!
pass
elif type.is_struct_or_union or type.is_cpp_class:
self.generate_struct_union_predeclaration(entry, code)
elif type.is_extension_type:
self.generate_objstruct_predeclaration(type, code)
# Actual declarations
for entry in type_entries:
if not entry.in_cinclude:
#print "generate_type_header_code:", entry.name, repr(entry.type) ###
type = entry.type
if type.is_typedef: # Must test this first!
self.generate_typedef(entry, code)
elif type.is_enum:
self.generate_enum_definition(entry, code)
elif type.is_struct_or_union:
self.generate_struct_union_definition(entry, code)
elif type.is_cpp_class:
self.generate_cpp_class_definition(entry, code)
elif type.is_extension_type:
self.generate_objstruct_definition(type, code)
def generate_gcc33_hack(self, env, code):
# Workaround for spurious warning generation in gcc 3.3
code.putln("")
for entry in env.c_class_entries:
type = entry.type
if not type.typedef_flag:
name = type.objstruct_cname
if name.startswith("__pyx_"):
tail = name[6:]
else:
tail = name
code.putln("typedef struct %s __pyx_gcc33_%s;" % (
name, tail))
def generate_typedef(self, entry, code):
base_type = entry.type.typedef_base_type
if base_type.is_numeric:
try:
writer = code.globalstate['numeric_typedefs']
except KeyError:
writer = code
else:
writer = code
writer.mark_pos(entry.pos)
writer.putln("typedef %s;" % base_type.declaration_code(entry.cname))
def sue_predeclaration(self, type, kind, name):
if type.typedef_flag:
return "%s %s;\ntypedef %s %s %s;" % (
kind, name,
kind, name, name)
else:
return "%s %s;" % (kind, name)
def generate_struct_union_predeclaration(self, entry, code):
type = entry.type
if type.is_cpp_class and type.templates:
code.putln("template <typename %s>" % ", typename ".join([T.declaration_code("") for T in type.templates]))
code.putln(self.sue_predeclaration(type, type.kind, type.cname))
def sue_header_footer(self, type, kind, name):
header = "%s %s {" % (kind, name)
footer = "};"
return header, footer
def generate_struct_union_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
scope = type.scope
if scope:
kind = type.kind
packed = type.is_struct and type.packed
if packed:
kind = "%s %s" % (type.kind, "__Pyx_PACKED")
code.globalstate.use_utility_code(packed_struct_utility_code)
header, footer = \
self.sue_header_footer(type, kind, type.cname)
if packed:
code.putln("#if defined(__SUNPRO_C)")
code.putln(" #pragma pack(1)")
code.putln("#elif !defined(__GNUC__)")
code.putln(" #pragma pack(push, 1)")
code.putln("#endif")
code.putln(header)
var_entries = scope.var_entries
if not var_entries:
error(entry.pos,
"Empty struct or union definition not allowed outside a"
" 'cdef extern from' block")
for attr in var_entries:
code.putln(
"%s;" %
attr.type.declaration_code(attr.cname))
code.putln(footer)
if packed:
code.putln("#if defined(__SUNPRO_C)")
code.putln(" #pragma pack()")
code.putln("#elif !defined(__GNUC__)")
code.putln(" #pragma pack(pop)")
code.putln("#endif")
def generate_cpp_class_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
scope = type.scope
if scope:
if type.templates:
code.putln("template <class %s>" % ", class ".join([T.declaration_code("") for T in type.templates]))
# Just let everything be public.
code.put("struct %s" % type.cname)
if type.base_classes:
base_class_decl = ", public ".join(
[base_class.declaration_code("") for base_class in type.base_classes])
code.put(" : public %s" % base_class_decl)
code.putln(" {")
has_virtual_methods = False
has_destructor = False
for attr in scope.var_entries:
if attr.type.is_cfunction and attr.name != "<init>":
code.put("virtual ")
has_virtual_methods = True
if attr.cname[0] == '~':
has_destructor = True
code.putln(
"%s;" %
attr.type.declaration_code(attr.cname))
if has_virtual_methods and not has_destructor:
code.put("virtual ~%s() { }" % type.cname)
code.putln("};")
def generate_enum_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
name = entry.cname or entry.name or ""
header, footer = \
self.sue_header_footer(type, "enum", name)
code.putln(header)
enum_values = entry.enum_values
if not enum_values:
error(entry.pos,
"Empty enum definition not allowed outside a"
" 'cdef extern from' block")
else:
last_entry = enum_values[-1]
# this does not really generate code, just builds the result value
for value_entry in enum_values:
if value_entry.value_node is not None:
value_entry.value_node.generate_evaluation_code(code)
for value_entry in enum_values:
if value_entry.value_node is None:
value_code = value_entry.cname
else:
value_code = ("%s = %s" % (
value_entry.cname,
value_entry.value_node.result()))
if value_entry is not last_entry:
value_code += ","
code.putln(value_code)
code.putln(footer)
if entry.type.typedef_flag:
# Not pre-declared.
code.putln("typedef enum %s %s;" % (name, name))
def generate_typeobj_predeclaration(self, entry, code):
code.putln("")
name = entry.type.typeobj_cname
if name:
if entry.visibility == 'extern' and not entry.in_cinclude:
code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_IMPORT"),
name))
elif entry.visibility == 'public':
code.putln("%s %s %s;" % (
Naming.extern_c_macro,
PyrexTypes.public_decl("PyTypeObject", "DL_EXPORT"),
name))
# ??? Do we really need the rest of this? ???
#else:
# code.putln("static PyTypeObject %s;" % name)
def generate_exttype_vtable_struct(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate struct declaration for an extension type's vtable.
type = entry.type
scope = type.scope
self.specialize_fused_types(scope)
if type.vtabstruct_cname:
code.putln("")
code.putln(
"struct %s {" %
type.vtabstruct_cname)
if type.base_type and type.base_type.vtabstruct_cname:
code.putln("struct %s %s;" % (
type.base_type.vtabstruct_cname,
Naming.obj_base_cname))
for method_entry in scope.cfunc_entries:
if not method_entry.is_inherited:
code.putln(
"%s;" % method_entry.type.declaration_code("(*%s)" % method_entry.cname))
code.putln(
"};")
def generate_exttype_vtabptr_declaration(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate declaration of pointer to an extension type's vtable.
type = entry.type
if type.vtabptr_cname:
code.putln("static struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabptr_cname))
def generate_exttype_final_methods_declaration(self, entry, code):
if not entry.used:
return
code.mark_pos(entry.pos)
# Generate final methods prototypes
type = entry.type
for method_entry in entry.type.scope.cfunc_entries:
if not method_entry.is_inherited and method_entry.final_func_cname:
declaration = method_entry.type.declaration_code(
method_entry.final_func_cname)
modifiers = code.build_function_modifiers(method_entry.func_modifiers)
code.putln("static %s%s;" % (modifiers, declaration))
def generate_objstruct_predeclaration(self, type, code):
if not type.scope:
return
code.putln(self.sue_predeclaration(type, "struct", type.objstruct_cname))
def generate_objstruct_definition(self, type, code):
code.mark_pos(type.pos)
# Generate object struct definition for an
# extension type.
if not type.scope:
return # Forward declared but never defined
header, footer = \
self.sue_header_footer(type, "struct", type.objstruct_cname)
code.putln(header)
base_type = type.base_type
if base_type:
basestruct_cname = base_type.objstruct_cname
if basestruct_cname == "PyTypeObject":
# User-defined subclasses of type are heap allocated.
basestruct_cname = "PyHeapTypeObject"
code.putln(
"%s%s %s;" % (
("struct ", "")[base_type.typedef_flag],
basestruct_cname,
Naming.obj_base_cname))
else:
code.putln(
"PyObject_HEAD")
if type.vtabslot_cname and not (type.base_type and type.base_type.vtabslot_cname):
code.putln(
"struct %s *%s;" % (
type.vtabstruct_cname,
type.vtabslot_cname))
for attr in type.scope.var_entries:
if attr.is_declared_generic:
attr_type = py_object_type
else:
attr_type = attr.type
code.putln(
"%s;" %
attr_type.declaration_code(attr.cname))
code.putln(footer)
if type.objtypedef_cname is not None:
# Only for exposing public typedef name.
code.putln("typedef struct %s %s;" % (type.objstruct_cname, type.objtypedef_cname))
def generate_c_class_declarations(self, env, code, definition):
for entry in env.c_class_entries:
if definition or entry.defined_in_pxd:
code.putln("static PyTypeObject *%s = 0;" %
entry.type.typeptr_cname)
def generate_cvariable_declarations(self, env, code, definition):
if env.is_cython_builtin:
return
for entry in env.var_entries:
if (entry.in_cinclude or entry.in_closure or
(entry.visibility == 'private' and
not (entry.defined_in_pxd or entry.used))):
continue
storage_class = None
dll_linkage = None
cname = None
init = None
if entry.visibility == 'extern':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'public':
storage_class = Naming.extern_c_macro
if definition:
dll_linkage = "DL_EXPORT"
else:
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'private':
storage_class = "static"
dll_linkage = None
if entry.init is not None:
init = entry.type.literal_code(entry.init)
type = entry.type
cname = entry.cname
if entry.defined_in_pxd and not definition:
storage_class = "static"
dll_linkage = None
type = CPtrType(type)
cname = env.mangle(Naming.varptr_prefix, entry.name)
init = 0
if storage_class:
code.put("%s " % storage_class)
code.put(type.declaration_code(
cname, dll_linkage = dll_linkage))
if init is not None:
code.put_safe(" = %s" % init)
code.putln(";")
if entry.cname != cname:
code.putln("#define %s (*%s)" % (entry.cname, cname))
def generate_cfunction_declarations(self, env, code, definition):
for entry in env.cfunc_entries:
if entry.used or (entry.visibility == 'public' or entry.api):
generate_cfunction_declaration(entry, env, code, definition)
def generate_variable_definitions(self, env, code):
for entry in env.var_entries:
if (not entry.in_cinclude and
entry.visibility == "public"):
code.put(entry.type.declaration_code(entry.cname))
if entry.init is not None:
init = entry.type.literal_code(entry.init)
code.put_safe(" = %s" % init)
code.putln(";")
def generate_typeobj_definitions(self, env, code):
full_module_name = env.qualified_name
for entry in env.c_class_entries:
#print "generate_typeobj_definitions:", entry.name
#print "...visibility =", entry.visibility
if entry.visibility != 'extern':
type = entry.type
scope = type.scope
if scope: # could be None if there was an error
self.generate_exttype_vtable(scope, code)
self.generate_new_function(scope, code, entry)
self.generate_dealloc_function(scope, code)
if scope.needs_gc():
self.generate_traverse_function(scope, code, entry)
if scope.needs_tp_clear():
self.generate_clear_function(scope, code, entry)
if scope.defines_any(["__getitem__"]):
self.generate_getitem_int_function(scope, code)
if scope.defines_any(["__setitem__", "__delitem__"]):
self.generate_ass_subscript_function(scope, code)
if scope.defines_any(["__getslice__", "__setslice__", "__delslice__"]):
warning(self.pos, "__getslice__, __setslice__, and __delslice__ are not supported by Python 3, use __getitem__, __setitem__, and __delitem__ instead", 1)
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("#error __getslice__, __setslice__, and __delslice__ not supported in Python 3.")
code.putln("#endif")
if scope.defines_any(["__setslice__", "__delslice__"]):
self.generate_ass_slice_function(scope, code)
if scope.defines_any(["__getattr__","__getattribute__"]):
self.generate_getattro_function(scope, code)
if scope.defines_any(["__setattr__", "__delattr__"]):
self.generate_setattro_function(scope, code)
if scope.defines_any(["__get__"]):
self.generate_descr_get_function(scope, code)
if scope.defines_any(["__set__", "__delete__"]):
self.generate_descr_set_function(scope, code)
self.generate_property_accessors(scope, code)
self.generate_method_table(scope, code)
self.generate_getset_table(scope, code)
self.generate_typeobj_definition(full_module_name, entry, code)
def generate_exttype_vtable(self, scope, code):
# Generate the definition of an extension type's vtable.
type = scope.parent_type
if type.vtable_cname:
code.putln("static struct %s %s;" % (
type.vtabstruct_cname,
type.vtable_cname))
def generate_self_cast(self, scope, code):
type = scope.parent_type
code.putln(
"%s = (%s)o;" % (
type.declaration_code("p"),
type.declaration_code("")))
def generate_new_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func = scope.mangle_internal("tp_new")
type = scope.parent_type
base_type = type.base_type
have_entries, (py_attrs, py_buffers, memoryview_slices) = \
scope.get_refcounted_entries()
is_final_type = scope.parent_type.is_final_type
if scope.is_internal:
# internal classes (should) never need None inits, normal zeroing will do
py_attrs = []
cpp_class_attrs = [entry for entry in scope.var_entries
if entry.type.is_cpp_class]
new_func_entry = scope.lookup_here("__new__")
if base_type or (new_func_entry and new_func_entry.is_special
and not new_func_entry.trivial_signature):
unused_marker = ''
else:
unused_marker = 'CYTHON_UNUSED '
if base_type:
freelist_size = 0 # not currently supported
else:
freelist_size = scope.directives.get('freelist', 0)
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
decls = code.globalstate['decls']
decls.putln("static PyObject *%s(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/" %
slot_func)
code.putln("")
if freelist_size:
code.putln("static %s[%d];" % (
scope.parent_type.declaration_code(freelist_name),
freelist_size))
code.putln("static int %s = 0;" % freecount_name)
code.putln("")
code.putln(
"static PyObject *%s(PyTypeObject *t, %sPyObject *a, %sPyObject *k) {"
% (slot_func, unused_marker, unused_marker))
need_self_cast = (type.vtabslot_cname or
(py_buffers or memoryview_slices or py_attrs) or
cpp_class_attrs)
if need_self_cast:
code.putln("%s;" % scope.parent_type.declaration_code("p"))
if base_type:
tp_new = TypeSlots.get_base_slot_function(scope, tp_slot)
if tp_new is None:
tp_new = "%s->tp_new" % base_type.typeptr_cname
code.putln("PyObject *o = %s(t, a, k);" % tp_new)
else:
code.putln("PyObject *o;")
if freelist_size:
code.globalstate.use_utility_code(
UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
if is_final_type:
type_safety_check = ''
else:
type_safety_check = ' & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)'
obj_struct = type.declaration_code("", deref=True)
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && likely((%s > 0) & (t->tp_basicsize == sizeof(%s))%s)) {" % (
freecount_name, obj_struct, type_safety_check))
code.putln("o = (PyObject*)%s[--%s];" % (
freelist_name, freecount_name))
code.putln("memset(o, 0, sizeof(%s));" % obj_struct)
code.putln("(void) PyObject_INIT(o, t);")
if scope.needs_gc():
code.putln("PyObject_GC_Track(o);")
code.putln("} else {")
if not is_final_type:
code.putln("if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {")
code.putln("o = (*t->tp_alloc)(t, 0);")
if not is_final_type:
code.putln("} else {")
code.putln("o = (PyObject *) PyBaseObject_Type.tp_new(t, %s, 0);" % Naming.empty_tuple)
code.putln("}")
code.putln("if (unlikely(!o)) return 0;")
if freelist_size and not base_type:
code.putln('}')
if need_self_cast:
code.putln("p = %s;" % type.cast_code("o"))
#if need_self_cast:
# self.generate_self_cast(scope, code)
if type.vtabslot_cname:
vtab_base_type = type
while vtab_base_type.base_type and vtab_base_type.base_type.vtabstruct_cname:
vtab_base_type = vtab_base_type.base_type
if vtab_base_type is not type:
struct_type_cast = "(struct %s*)" % vtab_base_type.vtabstruct_cname
else:
struct_type_cast = ""
code.putln("p->%s = %s%s;" % (
type.vtabslot_cname,
struct_type_cast, type.vtabptr_cname))
for entry in cpp_class_attrs:
code.putln("new((void*)&(p->%s)) %s();" %
(entry.cname, entry.type.declaration_code("")))
for entry in py_attrs:
code.put_init_var_to_py_none(entry, "p->%s", nanny=False)
for entry in memoryview_slices:
code.putln("p->%s.data = NULL;" % entry.cname)
code.putln("p->%s.memview = NULL;" % entry.cname)
for entry in py_buffers:
code.putln("p->%s.obj = NULL;" % entry.cname)
if cclass_entry.cname == '__pyx_memoryviewslice':
code.putln("p->from_slice.memview = NULL;")
if new_func_entry and new_func_entry.is_special:
if new_func_entry.trivial_signature:
cinit_args = "o, %s, NULL" % Naming.empty_tuple
else:
cinit_args = "o, a, k"
code.putln(
"if (unlikely(%s(%s) < 0)) {" %
(new_func_entry.func_cname, cinit_args))
code.put_decref_clear("o", py_object_type, nanny=False)
code.putln(
"}")
code.putln(
"return o;")
code.putln(
"}")
def generate_dealloc_function(self, scope, code):
tp_slot = TypeSlots.ConstructorSlot("tp_dealloc", '__dealloc__')
slot_func = scope.mangle_internal("tp_dealloc")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
slot_func_cname = scope.mangle_internal("tp_dealloc")
code.putln("")
code.putln(
"static void %s(PyObject *o) {" % slot_func_cname)
is_final_type = scope.parent_type.is_final_type
needs_gc = scope.needs_gc()
weakref_slot = scope.lookup_here("__weakref__")
if weakref_slot not in scope.var_entries:
weakref_slot = None
_, (py_attrs, _, memoryview_slices) = scope.get_refcounted_entries()
cpp_class_attrs = [entry for entry in scope.var_entries
if entry.type.is_cpp_class]
if py_attrs or cpp_class_attrs or memoryview_slices or weakref_slot:
self.generate_self_cast(scope, code)
if not is_final_type:
# in Py3.4+, call tp_finalize() as early as possible
code.putln("#if PY_VERSION_HEX >= 0x030400a1")
if needs_gc:
finalised_check = '!_PyGC_FINALIZED(o)'
else:
finalised_check = (
'(!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))')
code.putln("if (unlikely(Py_TYPE(o)->tp_finalize) && %s) {" %
finalised_check)
# if instance was resurrected by finaliser, return
code.putln("if (PyObject_CallFinalizerFromDealloc(o)) return;")
code.putln("}")
code.putln("#endif")
if needs_gc:
# We must mark this object as (gc) untracked while tearing
# it down, lest the garbage collection is invoked while
# running this destructor.
code.putln("PyObject_GC_UnTrack(o);")
# call the user's __dealloc__
self.generate_usr_dealloc_call(scope, code)
if weakref_slot:
code.putln("if (p->__weakref__) PyObject_ClearWeakRefs(o);")
for entry in cpp_class_attrs:
code.putln("__Pyx_call_destructor(&p->%s);" % entry.cname)
for entry in py_attrs:
code.put_xdecref_clear("p->%s" % entry.cname, entry.type, nanny=False,
clear_before_decref=True)
for entry in memoryview_slices:
code.put_xdecref_memoryviewslice("p->%s" % entry.cname,
have_gil=True)
if base_type:
if needs_gc:
# The base class deallocator probably expects this to be tracked,
# so undo the untracking above.
if base_type.scope and base_type.scope.needs_gc():
code.putln("PyObject_GC_Track(o);")
else:
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("if (PyType_IS_GC(Py_TYPE(o)->tp_base))")
code.putln("#endif")
code.putln("PyObject_GC_Track(o);")
tp_dealloc = TypeSlots.get_base_slot_function(scope, tp_slot)
if tp_dealloc is not None:
code.putln("%s(o);" % tp_dealloc)
elif base_type.is_builtin_type:
code.putln("%s->tp_dealloc(o);" % base_type.typeptr_cname)
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
base_cname = base_type.typeptr_cname
code.putln("if (likely(%s)) %s->tp_dealloc(o); "
"else __Pyx_call_next_tp_dealloc(o, %s);" % (
base_cname, base_cname, slot_func_cname))
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpDealloc", "ExtensionTypes.c"))
else:
freelist_size = scope.directives.get('freelist', 0)
if freelist_size:
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
if is_final_type:
type_safety_check = ''
else:
type_safety_check = (
' & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0)')
type = scope.parent_type
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && ((%s < %d) & (Py_TYPE(o)->tp_basicsize == sizeof(%s))%s)) {" % (
freecount_name, freelist_size, type.declaration_code("", deref=True),
type_safety_check))
code.putln("%s[%s++] = %s;" % (
freelist_name, freecount_name, type.cast_code("o")))
code.putln("} else {")
code.putln("(*Py_TYPE(o)->tp_free)(o);")
if freelist_size:
code.putln("}")
code.putln(
"}")
def generate_usr_dealloc_call(self, scope, code):
entry = scope.lookup_here("__dealloc__")
if not entry:
return
code.putln("{")
code.putln("PyObject *etype, *eval, *etb;")
code.putln("PyErr_Fetch(&etype, &eval, &etb);")
code.putln("++Py_REFCNT(o);")
code.putln("%s(o);" % entry.func_cname)
code.putln("--Py_REFCNT(o);")
code.putln("PyErr_Restore(etype, eval, etb);")
code.putln("}")
def generate_traverse_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.GCDependentSlot("tp_traverse")
slot_func = scope.mangle_internal("tp_traverse")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
code.putln("")
code.putln(
"static int %s(PyObject *o, visitproc v, void *a) {"
% slot_func)
have_entries, (py_attrs, py_buffers, memoryview_slices) = (
scope.get_refcounted_entries(include_gc_simple=False))
if base_type or py_attrs:
code.putln("int e;")
if py_attrs or py_buffers:
self.generate_self_cast(scope, code)
if base_type:
# want to call it explicitly if possible so inlining can be performed
static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
if static_call:
code.putln("e = %s(o, v, a); if (e) return e;" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
code.putln("if (!%s->tp_traverse); else { e = %s->tp_traverse(o,v,a); if (e) return e; }" % (
base_cname, base_cname))
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
base_cname = base_type.typeptr_cname
code.putln("e = ((likely(%s)) ? ((%s->tp_traverse) ? %s->tp_traverse(o, v, a) : 0) : __Pyx_call_next_tp_traverse(o, v, a, %s)); if (e) return e;" % (
base_cname, base_cname, base_cname, slot_func))
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpTraverse", "ExtensionTypes.c"))
for entry in py_attrs:
var_code = "p->%s" % entry.cname
code.putln(
"if (%s) {"
% var_code)
if entry.type.is_extension_type:
var_code = "((PyObject*)%s)" % var_code
code.putln(
"e = (*v)(%s, a); if (e) return e;"
% var_code)
code.putln(
"}")
# Traverse buffer exporting objects.
# Note: not traversing memoryview attributes of memoryview slices!
# When triggered by the GC, it would cause multiple visits (gc_refs
# subtractions which is not matched by its reference count!)
for entry in py_buffers:
cname = entry.cname + ".obj"
code.putln("if (p->%s) {" % cname)
code.putln( "e = (*v)(p->%s, a); if (e) return e;" % cname)
code.putln("}")
code.putln(
"return 0;")
code.putln(
"}")
def generate_clear_function(self, scope, code, cclass_entry):
tp_slot = TypeSlots.GCDependentSlot("tp_clear")
slot_func = scope.mangle_internal("tp_clear")
base_type = scope.parent_type.base_type
if tp_slot.slot_code(scope) != slot_func:
return # never used
have_entries, (py_attrs, py_buffers, memoryview_slices) = (
scope.get_refcounted_entries(include_gc_simple=False))
if py_attrs or py_buffers or base_type:
unused = ''
else:
unused = 'CYTHON_UNUSED '
code.putln("")
code.putln("static int %s(%sPyObject *o) {" % (slot_func, unused))
if py_attrs and Options.clear_to_none:
code.putln("PyObject* tmp;")
if py_attrs or py_buffers:
self.generate_self_cast(scope, code)
if base_type:
# want to call it explicitly if possible so inlining can be performed
static_call = TypeSlots.get_base_slot_function(scope, tp_slot)
if static_call:
code.putln("%s(o);" % static_call)
elif base_type.is_builtin_type:
base_cname = base_type.typeptr_cname
code.putln("if (!%s->tp_clear); else %s->tp_clear(o);" % (
base_cname, base_cname))
else:
# This is an externally defined type. Calling through the
# cimported base type pointer directly interacts badly with
# the module cleanup, which may already have cleared it.
# In that case, fall back to traversing the type hierarchy.
base_cname = base_type.typeptr_cname
code.putln("if (likely(%s)) { if (%s->tp_clear) %s->tp_clear(o); } else __Pyx_call_next_tp_clear(o, %s);" % (
base_cname, base_cname, base_cname, slot_func))
code.globalstate.use_utility_code(
UtilityCode.load_cached("CallNextTpClear", "ExtensionTypes.c"))
if Options.clear_to_none:
for entry in py_attrs:
name = "p->%s" % entry.cname
code.putln("tmp = ((PyObject*)%s);" % name)
if entry.is_declared_generic:
code.put_init_to_py_none(name, py_object_type, nanny=False)
else:
code.put_init_to_py_none(name, entry.type, nanny=False)
code.putln("Py_XDECREF(tmp);")
else:
for entry in py_attrs:
code.putln("Py_CLEAR(p->%s);" % entry.cname)
for entry in py_buffers:
# Note: shouldn't this call __Pyx_ReleaseBuffer ??
code.putln("Py_CLEAR(p->%s.obj);" % entry.cname)
if cclass_entry.cname == '__pyx_memoryviewslice':
code.putln("__PYX_XDEC_MEMVIEW(&p->from_slice, 1);")
code.putln(
"return 0;")
code.putln(
"}")
def generate_getitem_int_function(self, scope, code):
# This function is put into the sq_item slot when
# a __getitem__ method is present. It converts its
# argument to a Python integer and calls mp_subscript.
code.putln(
"static PyObject *%s(PyObject *o, Py_ssize_t i) {" %
scope.mangle_internal("sq_item"))
code.putln(
"PyObject *r;")
code.putln(
"PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;")
code.putln(
"r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);")
code.putln(
"Py_DECREF(x);")
code.putln(
"return r;")
code.putln(
"}")
def generate_ass_subscript_function(self, scope, code):
# Setting and deleting an item are both done through
# the ass_subscript method, so we dispatch to user's __setitem__
# or __delitem__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setitem__")
del_entry = scope.lookup_here("__delitem__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *i, PyObject *v) {" %
scope.mangle_internal("mp_ass_subscript"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, i, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript assignment not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_mapping", "mp_ass_subscript", "o, i, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_guarded_basetype_call(
self, base_type, substructure, slot, args, code):
if base_type:
base_tpname = base_type.typeptr_cname
if substructure:
code.putln(
"if (%s->%s && %s->%s->%s)" % (
base_tpname, substructure, base_tpname, substructure, slot))
code.putln(
" return %s->%s->%s(%s);" % (
base_tpname, substructure, slot, args))
else:
code.putln(
"if (%s->%s)" % (
base_tpname, slot))
code.putln(
" return %s->%s(%s);" % (
base_tpname, slot, args))
def generate_ass_slice_function(self, scope, code):
# Setting and deleting a slice are both done through
# the ass_slice method, so we dispatch to user's __setslice__
# or __delslice__, or raise an exception.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setslice__")
del_entry = scope.lookup_here("__delslice__")
code.putln("")
code.putln(
"static int %s(PyObject *o, Py_ssize_t i, Py_ssize_t j, PyObject *v) {" %
scope.mangle_internal("sq_ass_slice"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, i, j, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "2-element slice assignment not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, i, j);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, "tp_as_sequence", "sq_ass_slice", "o, i, j, v", code)
code.putln(
"PyErr_Format(PyExc_NotImplementedError,")
code.putln(
' "2-element slice deletion not supported by %.200s", Py_TYPE(o)->tp_name);')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_getattro_function(self, scope, code):
# First try to get the attribute using __getattribute__, if defined, or
# PyObject_GenericGetAttr.
#
# If that raises an AttributeError, call the __getattr__ if defined.
#
# In both cases, defined can be in this class, or any base class.
def lookup_here_or_base(n,type=None):
# Recursive lookup
if type is None:
type = scope.parent_type
r = type.scope.lookup_here(n)
if r is None and \
type.base_type is not None:
return lookup_here_or_base(n,type.base_type)
else:
return r
getattr_entry = lookup_here_or_base("__getattr__")
getattribute_entry = lookup_here_or_base("__getattribute__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, PyObject *n) {"
% scope.mangle_internal("tp_getattro"))
if getattribute_entry is not None:
code.putln(
"PyObject *v = %s(o, n);" %
getattribute_entry.func_cname)
else:
code.putln(
"PyObject *v = PyObject_GenericGetAttr(o, n);")
if getattr_entry is not None:
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
code.putln(
"PyErr_Clear();")
code.putln(
"v = %s(o, n);" %
getattr_entry.func_cname)
code.putln(
"}")
code.putln(
"return v;")
code.putln(
"}")
def generate_setattro_function(self, scope, code):
# Setting and deleting an attribute are both done through
# the setattro method, so we dispatch to user's __setattr__
# or __delattr__ or fall back on PyObject_GenericSetAttr.
base_type = scope.parent_type.base_type
set_entry = scope.lookup_here("__setattr__")
del_entry = scope.lookup_here("__delattr__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *n, PyObject *v) {" %
scope.mangle_internal("tp_setattro"))
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, n, v);" %
set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, v);")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o, n);" %
del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_setattro", "o, n, v", code)
code.putln(
"return PyObject_GenericSetAttr(o, n, 0);")
code.putln(
"}")
code.putln(
"}")
def generate_descr_get_function(self, scope, code):
# The __get__ function of a descriptor object can be
# called with NULL for the second or third arguments
# under some circumstances, so we replace them with
# None in that case.
user_get_entry = scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, PyObject *i, PyObject *c) {" %
scope.mangle_internal("tp_descr_get"))
code.putln(
"PyObject *r = 0;")
code.putln(
"if (!i) i = Py_None;")
code.putln(
"if (!c) c = Py_None;")
#code.put_incref("i", py_object_type)
#code.put_incref("c", py_object_type)
code.putln(
"r = %s(o, i, c);" %
user_get_entry.func_cname)
#code.put_decref("i", py_object_type)
#code.put_decref("c", py_object_type)
code.putln(
"return r;")
code.putln(
"}")
def generate_descr_set_function(self, scope, code):
# Setting and deleting are both done through the __set__
# method of a descriptor, so we dispatch to user's __set__
# or __delete__ or raise an exception.
base_type = scope.parent_type.base_type
user_set_entry = scope.lookup_here("__set__")
user_del_entry = scope.lookup_here("__delete__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *i, PyObject *v) {" %
scope.mangle_internal("tp_descr_set"))
code.putln(
"if (v) {")
if user_set_entry:
code.putln(
"return %s(o, i, v);" %
user_set_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if user_del_entry:
code.putln(
"return %s(o, i);" %
user_del_entry.func_cname)
else:
self.generate_guarded_basetype_call(
base_type, None, "tp_descr_set", "o, i, v", code)
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__delete__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_property_accessors(self, cclass_scope, code):
for entry in cclass_scope.property_entries:
property_scope = entry.scope
if property_scope.defines_any(["__get__"]):
self.generate_property_get_function(entry, code)
if property_scope.defines_any(["__set__", "__del__"]):
self.generate_property_set_function(entry, code)
def generate_property_get_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.getter_cname = property_scope.parent_scope.mangle(
Naming.prop_get_prefix, property_entry.name)
get_entry = property_scope.lookup_here("__get__")
code.putln("")
code.putln(
"static PyObject *%s(PyObject *o, CYTHON_UNUSED void *x) {" %
property_entry.getter_cname)
code.putln(
"return %s(o);" %
get_entry.func_cname)
code.putln(
"}")
def generate_property_set_function(self, property_entry, code):
property_scope = property_entry.scope
property_entry.setter_cname = property_scope.parent_scope.mangle(
Naming.prop_set_prefix, property_entry.name)
set_entry = property_scope.lookup_here("__set__")
del_entry = property_scope.lookup_here("__del__")
code.putln("")
code.putln(
"static int %s(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) {" %
property_entry.setter_cname)
code.putln(
"if (v) {")
if set_entry:
code.putln(
"return %s(o, v);" %
set_entry.func_cname)
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__set__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"else {")
if del_entry:
code.putln(
"return %s(o);" %
del_entry.func_cname)
else:
code.putln(
'PyErr_SetString(PyExc_NotImplementedError, "__del__");')
code.putln(
"return -1;")
code.putln(
"}")
code.putln(
"}")
def generate_typeobj_definition(self, modname, entry, code):
type = entry.type
scope = type.scope
for suite in TypeSlots.substructures:
suite.generate_substructure(scope, code)
code.putln("")
if entry.visibility == 'public':
header = "DL_EXPORT(PyTypeObject) %s = {"
else:
header = "static PyTypeObject %s = {"
#code.putln(header % scope.parent_type.typeobj_cname)
code.putln(header % type.typeobj_cname)
code.putln(
"PyVarObject_HEAD_INIT(0, 0)")
code.putln(
'__Pyx_NAMESTR("%s.%s"), /*tp_name*/' % (
self.full_module_name, scope.class_name))
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln(
"sizeof(%s), /*tp_basicsize*/" %
objstruct)
code.putln(
"0, /*tp_itemsize*/")
for slot in TypeSlots.slot_table:
slot.generate(scope, code)
code.putln(
"};")
def generate_method_table(self, env, code):
if env.is_c_class_scope and not env.pyfunc_entries:
return
code.putln("")
code.putln(
"static PyMethodDef %s[] = {" %
env.method_table_cname)
for entry in env.pyfunc_entries:
if not entry.fused_cfunction:
code.put_pymethoddef(entry, ",")
code.putln(
"{0, 0, 0, 0}")
code.putln(
"};")
def generate_getset_table(self, env, code):
if env.property_entries:
code.putln("")
code.putln(
"static struct PyGetSetDef %s[] = {" %
env.getset_table_cname)
for entry in env.property_entries:
if entry.doc:
doc_code = "__Pyx_DOCSTR(%s)" % code.get_string_const(entry.doc)
else:
doc_code = "0"
code.putln(
'{(char *)"%s", %s, %s, %s, 0},' % (
entry.name,
entry.getter_cname or "0",
entry.setter_cname or "0",
doc_code))
code.putln(
"{0, 0, 0, 0, 0}")
code.putln(
"};")
def generate_import_star(self, env, code):
env.use_utility_code(streq_utility_code)
code.putln()
code.putln("static char* %s_type_names[] = {" % Naming.import_star)
for name, entry in sorted(env.entries.items()):
if entry.is_type:
code.putln('"%s",' % name)
code.putln("0")
code.putln("};")
code.putln()
code.enter_cfunc_scope() # as we need labels
code.putln("static int %s(PyObject *o, PyObject* py_name, char *name) {" % Naming.import_star_set)
code.putln("char** type_name = %s_type_names;" % Naming.import_star)
code.putln("while (*type_name) {")
code.putln("if (__Pyx_StrEq(name, *type_name)) {")
code.putln('PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);')
code.putln('goto bad;')
code.putln("}")
code.putln("type_name++;")
code.putln("}")
old_error_label = code.new_error_label()
code.putln("if (0);") # so the first one can be "else if"
for name, entry in env.entries.items():
if entry.is_cglobal and entry.used:
code.putln('else if (__Pyx_StrEq(name, "%s")) {' % name)
if entry.type.is_pyobject:
if entry.type.is_extension_type or entry.type.is_builtin_type:
code.putln("if (!(%s)) %s;" % (
entry.type.type_test_code("o"),
code.error_goto(entry.pos)))
code.putln("Py_INCREF(o);")
code.put_decref(entry.cname, entry.type, nanny=False)
code.putln("%s = %s;" % (
entry.cname,
PyrexTypes.typecast(entry.type, py_object_type, "o")))
elif entry.type.from_py_function:
rhs = "%s(o)" % entry.type.from_py_function
if entry.type.is_enum:
rhs = PyrexTypes.typecast(entry.type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; if (%s) %s;" % (
entry.cname,
rhs,
entry.type.error_condition(entry.cname),
code.error_goto(entry.pos)))
else:
code.putln('PyErr_Format(PyExc_TypeError, "Cannot convert Python object %s to %s");' % (name, entry.type))
code.putln(code.error_goto(entry.pos))
code.putln("}")
code.putln("else {")
code.putln("if (PyObject_SetAttr(%s, py_name, o) < 0) goto bad;" % Naming.module_cname)
code.putln("}")
code.putln("return 0;")
if code.label_used(code.error_label):
code.put_label(code.error_label)
# This helps locate the offending name.
code.put_add_traceback(self.full_module_name)
code.error_label = old_error_label
code.putln("bad:")
code.putln("return -1;")
code.putln("}")
code.putln(import_star_utility_code)
code.exit_cfunc_scope() # done with labels
def generate_module_init_func(self, imported_modules, env, code):
code.enter_cfunc_scope()
code.putln("")
header2 = "PyMODINIT_FUNC init%s(void)" % env.module_name
header3 = "PyMODINIT_FUNC PyInit_%s(void)" % env.module_name
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln("%s; /*proto*/" % header2)
code.putln(header2)
code.putln("#else")
code.putln("%s; /*proto*/" % header3)
code.putln(header3)
code.putln("#endif")
code.putln("{")
tempdecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.putln("#if CYTHON_REFNANNY")
code.putln("__Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"refnanny\");")
code.putln("if (!__Pyx_RefNanny) {")
code.putln(" PyErr_Clear();")
code.putln(" __Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"Cython.Runtime.refnanny\");")
code.putln(" if (!__Pyx_RefNanny)")
code.putln(" Py_FatalError(\"failed to import 'refnanny' module\");")
code.putln("}")
code.putln("#endif")
code.put_setup_refcount_context(header3)
env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
code.putln("if ( __Pyx_check_binary_version() < 0) %s" % code.error_goto(self.pos))
code.putln("%s = PyTuple_New(0); %s" % (Naming.empty_tuple, code.error_goto_if_null(Naming.empty_tuple, self.pos)))
code.putln("%s = PyBytes_FromStringAndSize(\"\", 0); %s" % (Naming.empty_bytes, code.error_goto_if_null(Naming.empty_bytes, self.pos)))
code.putln("#ifdef __Pyx_CyFunction_USED")
code.putln("if (__Pyx_CyFunction_init() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
code.putln("#ifdef __Pyx_FusedFunction_USED")
code.putln("if (__pyx_FusedFunction_init() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
code.putln("#ifdef __Pyx_Generator_USED")
code.putln("if (__pyx_Generator_init() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
code.putln("/*--- Library function declarations ---*/")
env.generate_library_function_declarations(code)
code.putln("/*--- Threads initialization code ---*/")
code.putln("#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS")
code.putln("#ifdef WITH_THREAD /* Python build with threading support? */")
code.putln("PyEval_InitThreads();")
code.putln("#endif")
code.putln("#endif")
code.putln("/*--- Module creation code ---*/")
self.generate_module_creation_code(env, code)
code.putln("/*--- Initialize various global constants etc. ---*/")
code.putln(code.error_goto_if_neg("__Pyx_InitGlobals()", self.pos))
code.putln("#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)")
code.putln("if (__Pyx_init_sys_getdefaultencoding_params() < 0) %s" % code.error_goto(self.pos))
code.putln("#endif")
__main__name = code.globalstate.get_py_string_const(
EncodedString("__main__"), identifier=True)
code.putln("if (%s%s) {" % (Naming.module_is_main, self.full_module_name.replace('.', '__')))
code.putln(
'if (__Pyx_SetAttrString(%s, "__name__", %s) < 0) %s;' % (
env.module_cname,
__main__name.cname,
code.error_goto(self.pos)))
code.putln("}")
# set up __file__ and __path__, then add the module to sys.modules
self.generate_module_import_setup(env, code)
if Options.cache_builtins:
code.putln("/*--- Builtin init code ---*/")
code.putln(code.error_goto_if_neg("__Pyx_InitCachedBuiltins()", self.pos))
code.putln("/*--- Constants init code ---*/")
code.putln(code.error_goto_if_neg("__Pyx_InitCachedConstants()", self.pos))
code.putln("/*--- Global init code ---*/")
self.generate_global_init_code(env, code)
code.putln("/*--- Variable export code ---*/")
self.generate_c_variable_export_code(env, code)
code.putln("/*--- Function export code ---*/")
self.generate_c_function_export_code(env, code)
code.putln("/*--- Type init code ---*/")
self.generate_type_init_code(env, code)
code.putln("/*--- Type import code ---*/")
for module in imported_modules:
self.generate_type_import_code_for_module(module, env, code)
code.putln("/*--- Variable import code ---*/")
for module in imported_modules:
self.generate_c_variable_import_code_for_module(module, env, code)
code.putln("/*--- Function import code ---*/")
for module in imported_modules:
self.specialize_fused_types(module)
self.generate_c_function_import_code_for_module(module, env, code)
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
self.body.generate_execution_code(code)
if Options.generate_cleanup_code:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RegisterModuleCleanup", "ModuleSetupCode.c"))
code.putln("if (__Pyx_RegisterCleanup()) %s;" % code.error_goto(self.pos))
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.putln('if (%s) {' % env.module_cname)
code.put_add_traceback("init %s" % env.qualified_name)
env.use_utility_code(Nodes.traceback_utility_code)
code.put_decref_clear(env.module_cname, py_object_type, nanny=False)
code.putln('} else if (!PyErr_Occurred()) {')
code.putln('PyErr_SetString(PyExc_ImportError, "init %s");' % env.qualified_name)
code.putln('}')
code.put_label(code.return_label)
code.put_finish_refcount_context()
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln("return;")
code.putln("#else")
code.putln("return %s;" % env.module_cname)
code.putln("#endif")
code.putln('}')
tempdecl_code.put_temp_declarations(code.funcstate)
code.exit_cfunc_scope()
def generate_module_import_setup(self, env, code):
module_path = env.directives['set_initial_path']
if module_path == 'SOURCEFILE':
module_path = self.pos[0].filename
if module_path:
code.putln('if (__Pyx_SetAttrString(%s, "__file__", %s) < 0) %s;' % (
env.module_cname,
code.globalstate.get_py_string_const(
EncodedString(decode_filename(module_path))).cname,
code.error_goto(self.pos)))
if env.is_package:
# set __path__ to mark the module as package
temp = code.funcstate.allocate_temp(py_object_type, True)
code.putln('%s = Py_BuildValue("[O]", %s); %s' % (
temp,
code.globalstate.get_py_string_const(
EncodedString(decode_filename(
os.path.dirname(module_path)))).cname,
code.error_goto_if_null(temp, self.pos)))
code.put_gotref(temp)
code.putln(
'if (__Pyx_SetAttrString(%s, "__path__", %s) < 0) %s;' % (
env.module_cname, temp, code.error_goto(self.pos)))
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
elif env.is_package:
# packages require __path__, so all we can do is try to figure
# out the module path at runtime by rerunning the import lookup
package_name, _ = self.full_module_name.rsplit('.', 1)
if '.' in package_name:
parent_name = '"%s"' % (package_name.rsplit('.', 1)[0],)
else:
parent_name = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load(
"SetPackagePathFromImportLib", "ImportExport.c"))
code.putln(code.error_goto_if_neg(
'__Pyx_SetPackagePathFromImportLib(%s, %s)' % (
parent_name,
code.globalstate.get_py_string_const(
EncodedString(env.module_name)).cname),
self.pos))
# CPython may not have put us into sys.modules yet, but relative imports and reimports require it
fq_module_name = self.full_module_name
if fq_module_name.endswith('.__init__'):
fq_module_name = fq_module_name[:-len('.__init__')]
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("{")
code.putln("PyObject *modules = PyImport_GetModuleDict(); %s" %
code.error_goto_if_null("modules", self.pos))
code.putln('if (!PyDict_GetItemString(modules, "%s")) {' % fq_module_name)
code.putln(code.error_goto_if_neg('PyDict_SetItemString(modules, "%s", %s)' % (
fq_module_name, env.module_cname), self.pos))
code.putln("}")
code.putln("}")
code.putln("#endif")
def generate_module_cleanup_func(self, env, code):
if not Options.generate_cleanup_code:
return
code.putln('static void %s(CYTHON_UNUSED PyObject *self) {' %
Naming.cleanup_cname)
if Options.generate_cleanup_code >= 2:
code.putln("/*--- Global cleanup code ---*/")
rev_entries = list(env.var_entries)
rev_entries.reverse()
for entry in rev_entries:
if entry.visibility != 'extern':
if entry.type.is_pyobject and entry.used:
code.put_xdecref_clear(
entry.cname, entry.type,
clear_before_decref=True,
nanny=False)
code.putln("__Pyx_CleanupGlobals();")
if Options.generate_cleanup_code >= 3:
code.putln("/*--- Type import cleanup code ---*/")
for ext_type in sorted(env.types_imported, key=operator.attrgetter('typeptr_cname')):
code.put_xdecref_clear(
ext_type.typeptr_cname, ext_type,
clear_before_decref=True,
nanny=False)
if Options.cache_builtins:
code.putln("/*--- Builtin cleanup code ---*/")
for entry in env.cached_builtins:
code.put_xdecref_clear(
entry.cname, PyrexTypes.py_object_type,
clear_before_decref=True,
nanny=False)
code.putln("/*--- Intern cleanup code ---*/")
code.put_decref_clear(Naming.empty_tuple,
PyrexTypes.py_object_type,
clear_before_decref=True,
nanny=False)
for entry in env.c_class_entries:
cclass_type = entry.type
if cclass_type.is_external or cclass_type.base_type:
continue
if cclass_type.scope.directives.get('freelist', 0):
scope = cclass_type.scope
freelist_name = scope.mangle_internal(Naming.freelist_name)
freecount_name = scope.mangle_internal(Naming.freecount_name)
code.putln("while (%s > 0) {" % freecount_name)
code.putln("PyObject* o = (PyObject*)%s[--%s];" % (
freelist_name, freecount_name))
code.putln("(*Py_TYPE(o)->tp_free)(o);")
code.putln("}")
# for entry in env.pynum_entries:
# code.put_decref_clear(entry.cname,
# PyrexTypes.py_object_type,
# nanny=False)
# for entry in env.all_pystring_entries:
# if entry.is_interned:
# code.put_decref_clear(entry.pystring_cname,
# PyrexTypes.py_object_type,
# nanny=False)
# for entry in env.default_entries:
# if entry.type.is_pyobject and entry.used:
# code.putln("Py_DECREF(%s); %s = 0;" % (
# code.entry_as_pyobject(entry), entry.cname))
code.putln('#if CYTHON_COMPILING_IN_PYPY')
code.putln('Py_CLEAR(%s);' % Naming.builtins_cname)
code.putln('#endif')
code.put_decref_clear(env.module_dict_cname, py_object_type,
nanny=False, clear_before_decref=True)
def generate_main_method(self, env, code):
module_is_main = "%s%s" % (Naming.module_is_main, self.full_module_name.replace('.', '__'))
if Options.embed == "main":
wmain = "wmain"
else:
wmain = Options.embed
code.globalstate.use_utility_code(
main_method.specialize(
module_name = env.module_name,
module_is_main = module_is_main,
main_method = Options.embed,
wmain_method = wmain))
def generate_pymoduledef_struct(self, env, code):
if env.doc:
doc = "__Pyx_DOCSTR(%s)" % code.get_string_const(env.doc)
else:
doc = "0"
if Options.generate_cleanup_code:
cleanup_func = "(freefunc)%s" % Naming.cleanup_cname
else:
cleanup_func = 'NULL'
code.putln("")
code.putln("#if PY_MAJOR_VERSION >= 3")
code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname)
code.putln("#if PY_VERSION_HEX < 0x03020000")
# fix C compiler warnings due to missing initialisers
code.putln(" { PyObject_HEAD_INIT(NULL) NULL, 0, NULL },")
code.putln("#else")
code.putln(" PyModuleDef_HEAD_INIT,")
code.putln("#endif")
code.putln(' __Pyx_NAMESTR("%s"),' % env.module_name)
code.putln(" %s, /* m_doc */" % doc)
code.putln(" -1, /* m_size */")
code.putln(" %s /* m_methods */," % env.method_table_cname)
code.putln(" NULL, /* m_reload */")
code.putln(" NULL, /* m_traverse */")
code.putln(" NULL, /* m_clear */")
code.putln(" %s /* m_free */" % cleanup_func)
code.putln("};")
code.putln("#endif")
def generate_module_creation_code(self, env, code):
# Generate code to create the module object and
# install the builtins.
if env.doc:
doc = "__Pyx_DOCSTR(%s)" % code.get_string_const(env.doc)
else:
doc = "0"
code.putln("#if PY_MAJOR_VERSION < 3")
code.putln(
'%s = Py_InitModule4(__Pyx_NAMESTR("%s"), %s, %s, 0, PYTHON_API_VERSION); Py_XINCREF(%s);' % (
env.module_cname,
env.module_name,
env.method_table_cname,
doc,
env.module_cname))
code.putln("#else")
code.putln(
"%s = PyModule_Create(&%s);" % (
env.module_cname,
Naming.pymoduledef_cname))
code.putln("#endif")
code.putln(code.error_goto_if_null(env.module_cname, self.pos))
code.putln(
"%s = PyModule_GetDict(%s); %s" % (
env.module_dict_cname, env.module_cname,
code.error_goto_if_null(env.module_dict_cname, self.pos)))
code.put_incref(env.module_dict_cname, py_object_type, nanny=False)
code.putln(
'%s = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); %s' % (
Naming.builtins_cname,
code.error_goto_if_null(Naming.builtins_cname, self.pos)))
code.putln('#if CYTHON_COMPILING_IN_PYPY')
code.putln('Py_INCREF(%s);' % Naming.builtins_cname)
code.putln('#endif')
code.putln(
'if (__Pyx_SetAttrString(%s, "__builtins__", %s) < 0) %s;' % (
env.module_cname,
Naming.builtins_cname,
code.error_goto(self.pos)))
if Options.pre_import is not None:
code.putln(
'%s = PyImport_AddModule(__Pyx_NAMESTR("%s")); %s' % (
Naming.preimport_cname,
Options.pre_import,
code.error_goto_if_null(Naming.preimport_cname, self.pos)))
def generate_global_init_code(self, env, code):
# Generate code to initialise global PyObject *
# variables to None.
for entry in env.var_entries:
if entry.visibility != 'extern':
if entry.used:
entry.type.global_init_code(entry, code)
def generate_c_variable_export_code(self, env, code):
# Generate code to create PyCFunction wrappers for exported C functions.
entries = []
for entry in env.var_entries:
if (entry.api
or entry.defined_in_pxd
or (Options.cimport_from_pyx and not entry.visibility == 'extern')):
entries.append(entry)
if entries:
env.use_utility_code(UtilityCode.load_cached("VoidPtrExport", "ImportExport.c"))
for entry in entries:
signature = entry.type.declaration_code("")
name = code.intern_identifier(entry.name)
code.putln('if (__Pyx_ExportVoidPtr(%s, (void *)&%s, "%s") < 0) %s' % (
name, entry.cname, signature,
code.error_goto(self.pos)))
def generate_c_function_export_code(self, env, code):
# Generate code to create PyCFunction wrappers for exported C functions.
entries = []
for entry in env.cfunc_entries:
if (entry.api
or entry.defined_in_pxd
or (Options.cimport_from_pyx and not entry.visibility == 'extern')):
entries.append(entry)
if entries:
env.use_utility_code(
UtilityCode.load_cached("FunctionExport", "ImportExport.c"))
for entry in entries:
signature = entry.type.signature_string()
code.putln('if (__Pyx_ExportFunction("%s", (void (*)(void))%s, "%s") < 0) %s' % (
entry.name,
entry.cname,
signature,
code.error_goto(self.pos)))
def generate_type_import_code_for_module(self, module, env, code):
# Generate type import code for all exported extension types in
# an imported module.
#if module.c_class_entries:
for entry in module.c_class_entries:
if entry.defined_in_pxd:
self.generate_type_import_code(env, entry.type, entry.pos, code)
def specialize_fused_types(self, pxd_env):
"""
If fused c(p)def functions are defined in an imported pxd, but not
used in this implementation file, we still have fused entries and
not specialized ones. This method replaces any fused entries with their
specialized ones.
"""
for entry in pxd_env.cfunc_entries[:]:
if entry.type.is_fused:
# This call modifies the cfunc_entries in-place
entry.type.get_all_specialized_function_types()
def generate_c_variable_import_code_for_module(self, module, env, code):
# Generate import code for all exported C functions in a cimported module.
entries = []
for entry in module.var_entries:
if entry.defined_in_pxd:
entries.append(entry)
if entries:
env.use_utility_code(
UtilityCode.load_cached("ModuleImport", "ImportExport.c"))
env.use_utility_code(
UtilityCode.load_cached("VoidPtrImport", "ImportExport.c"))
temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
'%s = __Pyx_ImportModule("%s"); if (!%s) %s' % (
temp,
module.qualified_name,
temp,
code.error_goto(self.pos)))
for entry in entries:
if env is module:
cname = entry.cname
else:
cname = module.mangle(Naming.varptr_prefix, entry.name)
signature = entry.type.declaration_code("")
code.putln(
'if (__Pyx_ImportVoidPtr(%s, "%s", (void **)&%s, "%s") < 0) %s' % (
temp, entry.name, cname, signature,
code.error_goto(self.pos)))
code.putln("Py_DECREF(%s); %s = 0;" % (temp, temp))
def generate_c_function_import_code_for_module(self, module, env, code):
# Generate import code for all exported C functions in a cimported module.
entries = []
for entry in module.cfunc_entries:
if entry.defined_in_pxd and entry.used:
entries.append(entry)
if entries:
env.use_utility_code(
UtilityCode.load_cached("ModuleImport", "ImportExport.c"))
env.use_utility_code(
UtilityCode.load_cached("FunctionImport", "ImportExport.c"))
temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
'%s = __Pyx_ImportModule("%s"); if (!%s) %s' % (
temp,
module.qualified_name,
temp,
code.error_goto(self.pos)))
for entry in entries:
code.putln(
'if (__Pyx_ImportFunction(%s, "%s", (void (**)(void))&%s, "%s") < 0) %s' % (
temp,
entry.name,
entry.cname,
entry.type.signature_string(),
code.error_goto(self.pos)))
code.putln("Py_DECREF(%s); %s = 0;" % (temp, temp))
def generate_type_init_code(self, env, code):
# Generate type import code for extern extension types
# and type ready code for non-extern ones.
for entry in env.c_class_entries:
if entry.visibility == 'extern' and not entry.utility_code_definition:
self.generate_type_import_code(env, entry.type, entry.pos, code)
else:
self.generate_base_type_import_code(env, entry, code)
self.generate_exttype_vtable_init_code(entry, code)
self.generate_type_ready_code(env, entry, code)
self.generate_typeptr_assignment_code(entry, code)
def generate_base_type_import_code(self, env, entry, code):
base_type = entry.type.base_type
if (base_type and base_type.module_name != env.qualified_name and not
base_type.is_builtin_type and not entry.utility_code_definition):
self.generate_type_import_code(env, base_type, self.pos, code)
def generate_type_import_code(self, env, type, pos, code):
# If not already done, generate code to import the typeobject of an
# extension type defined in another module, and extract its C method
# table pointer if any.
if type in env.types_imported:
return
env.use_utility_code(UtilityCode.load_cached("TypeImport", "ImportExport.c"))
self.generate_type_import_call(type, code,
code.error_goto_if_null(type.typeptr_cname, pos))
if type.vtabptr_cname:
code.globalstate.use_utility_code(
UtilityCode.load_cached('GetVTable', 'ImportExport.c'))
code.putln("%s = (struct %s*)__Pyx_GetVtable(%s->tp_dict); %s" % (
type.vtabptr_cname,
type.vtabstruct_cname,
type.typeptr_cname,
code.error_goto_if_null(type.vtabptr_cname, pos)))
env.types_imported.add(type)
py3_type_name_map = {'str' : 'bytes', 'unicode' : 'str'}
def generate_type_import_call(self, type, code, error_code):
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
sizeof_objstruct = objstruct
module_name = type.module_name
condition = replacement = None
if module_name not in ('__builtin__', 'builtins'):
module_name = '"%s"' % module_name
else:
module_name = '__Pyx_BUILTIN_MODULE_NAME'
if type.name in Code.non_portable_builtins_map:
condition, replacement = Code.non_portable_builtins_map[type.name]
if objstruct in Code.basicsize_builtins_map:
# Some builtin types have a tp_basicsize which differs from sizeof(...):
sizeof_objstruct = Code.basicsize_builtins_map[objstruct]
code.put('%s = __Pyx_ImportType(%s,' % (
type.typeptr_cname,
module_name))
if condition and replacement:
code.putln("") # start in new line
code.putln("#if %s" % condition)
code.putln('"%s",' % replacement)
code.putln("#else")
code.putln('"%s",' % type.name)
code.putln("#endif")
else:
code.put(' "%s", ' % type.name)
if sizeof_objstruct != objstruct:
if not condition:
code.putln("") # start in new line
code.putln("#if CYTHON_COMPILING_IN_PYPY")
code.putln('sizeof(%s),' % objstruct)
code.putln("#else")
code.putln('sizeof(%s),' % sizeof_objstruct)
code.putln("#endif")
else:
code.put('sizeof(%s), ' % objstruct)
code.putln('%i); %s' % (
not type.is_external or type.is_subclassed,
error_code))
def generate_type_ready_code(self, env, entry, code):
# Generate a call to PyType_Ready for an extension
# type defined in this module.
type = entry.type
typeobj_cname = type.typeobj_cname
scope = type.scope
if scope: # could be None if there was an error
if entry.visibility != 'extern':
for slot in TypeSlots.slot_table:
slot.generate_dynamic_init_code(scope, code)
code.putln(
"if (PyType_Ready(&%s) < 0) %s" % (
typeobj_cname,
code.error_goto(entry.pos)))
# Don't inherit tp_print from builtin types, restoring the
# behavior of using tp_repr or tp_str instead.
code.putln("%s.tp_print = 0;" % typeobj_cname)
# Fix special method docstrings. This is a bit of a hack, but
# unless we let PyType_Ready create the slot wrappers we have
# a significant performance hit. (See trac #561.)
for func in entry.type.scope.pyfunc_entries:
is_buffer = func.name in ('__getbuffer__',
'__releasebuffer__')
if (func.is_special and Options.docstrings and
func.wrapperbase_cname and not is_buffer):
slot = TypeSlots.method_name_to_slot[func.name]
preprocessor_guard = slot.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("{")
code.putln(
'PyObject *wrapper = __Pyx_GetAttrString((PyObject *)&%s, "%s"); %s' % (
typeobj_cname,
func.name,
code.error_goto_if_null('wrapper', entry.pos)))
code.putln(
"if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
code.putln(
"%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
func.wrapperbase_cname))
code.putln(
"%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
code.putln(
"((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
func.wrapperbase_cname))
code.putln("}")
code.putln("}")
code.putln('#endif')
if preprocessor_guard:
code.putln('#endif')
if type.vtable_cname:
code.putln(
"if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
typeobj_cname,
type.vtabptr_cname,
code.error_goto(entry.pos)))
code.globalstate.use_utility_code(
UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
if not type.scope.is_internal and not type.scope.directives['internal']:
# scope.is_internal is set for types defined by
# Cython (such as closures), the 'internal'
# directive is set by users
code.putln(
'if (__Pyx_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
Naming.module_cname,
scope.class_name,
typeobj_cname,
code.error_goto(entry.pos)))
weakref_entry = scope.lookup_here("__weakref__")
if weakref_entry:
if weakref_entry.type is py_object_type:
tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
if type.typedef_flag:
objstruct = type.objstruct_cname
else:
objstruct = "struct %s" % type.objstruct_cname
code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
tp_weaklistoffset,
tp_weaklistoffset,
objstruct,
weakref_entry.cname))
else:
error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
# extension type.
type = entry.type
if type.vtable_cname:
code.putln(
"%s = &%s;" % (
type.vtabptr_cname,
type.vtable_cname))
if type.base_type and type.base_type.vtabptr_cname:
code.putln(
"%s.%s = *%s;" % (
type.vtable_cname,
Naming.obj_base_cname,
type.base_type.vtabptr_cname))
c_method_entries = [
entry for entry in type.scope.cfunc_entries
if entry.func_cname ]
if c_method_entries:
for meth_entry in c_method_entries:
cast = meth_entry.type.signature_cast_string()
code.putln(
"%s.%s = %s%s;" % (
type.vtable_cname,
meth_entry.cname,
cast,
meth_entry.func_cname))
def generate_typeptr_assignment_code(self, entry, code):
# Generate code to initialise the typeptr of an extension
# type defined in this module to point to its type object.
type = entry.type
if type.typeobj_cname:
code.putln(
"%s = &%s;" % (
type.typeptr_cname, type.typeobj_cname))
def generate_cfunction_declaration(entry, env, code, definition):
from_cy_utility = entry.used and entry.utility_code_definition
if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and (definition
or entry.defined_in_pxd or entry.visibility == 'extern' or from_cy_utility)):
if entry.visibility == 'extern':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_IMPORT"
elif entry.visibility == 'public':
storage_class = Naming.extern_c_macro
dll_linkage = "DL_EXPORT"
elif entry.visibility == 'private':
storage_class = "static"
dll_linkage = None
else:
storage_class = "static"
dll_linkage = None
type = entry.type
if entry.defined_in_pxd and not definition:
storage_class = "static"
dll_linkage = None
type = CPtrType(type)
header = type.declaration_code(
entry.cname, dll_linkage = dll_linkage)
modifiers = code.build_function_modifiers(entry.func_modifiers)
code.putln("%s %s%s; /*proto*/" % (
storage_class,
modifiers,
header))
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
streq_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/
""",
impl = """
static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) {
while (*s1 != '\\0' && *s1 == *s2) { s1++; s2++; }
return *s1 == *s2;
}
""")
#------------------------------------------------------------------------------------
import_star_utility_code = """
/* import_all_from is an unexposed function from ceval.c */
static int
__Pyx_import_all_from(PyObject *locals, PyObject *v)
{
PyObject *all = __Pyx_GetAttrString(v, "__all__");
PyObject *dict, *name, *value;
int skip_leading_underscores = 0;
int pos, err;
if (all == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1; /* Unexpected error */
PyErr_Clear();
dict = __Pyx_GetAttrString(v, "__dict__");
if (dict == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1;
PyErr_SetString(PyExc_ImportError,
"from-import-* object has no __dict__ and no __all__");
return -1;
}
#if PY_MAJOR_VERSION < 3
all = PyObject_CallMethod(dict, (char *)"keys", NULL);
#else
all = PyMapping_Keys(dict);
#endif
Py_DECREF(dict);
if (all == NULL)
return -1;
skip_leading_underscores = 1;
}
for (pos = 0, err = 0; ; pos++) {
name = PySequence_GetItem(all, pos);
if (name == NULL) {
if (!PyErr_ExceptionMatches(PyExc_IndexError))
err = -1;
else
PyErr_Clear();
break;
}
if (skip_leading_underscores &&
#if PY_MAJOR_VERSION < 3
PyString_Check(name) &&
PyString_AS_STRING(name)[0] == '_')
#else
PyUnicode_Check(name) &&
PyUnicode_AS_UNICODE(name)[0] == '_')
#endif
{
Py_DECREF(name);
continue;
}
value = PyObject_GetAttr(v, name);
if (value == NULL)
err = -1;
else if (PyDict_CheckExact(locals))
err = PyDict_SetItem(locals, name, value);
else
err = PyObject_SetItem(locals, name, value);
Py_DECREF(name);
Py_XDECREF(value);
if (err != 0)
break;
}
Py_DECREF(all);
return err;
}
static int %(IMPORT_STAR)s(PyObject* m) {
int i;
int ret = -1;
char* s;
PyObject *locals = 0;
PyObject *list = 0;
#if PY_MAJOR_VERSION >= 3
PyObject *utf8_name = 0;
#endif
PyObject *name;
PyObject *item;
locals = PyDict_New(); if (!locals) goto bad;
if (__Pyx_import_all_from(locals, m) < 0) goto bad;
list = PyDict_Items(locals); if (!list) goto bad;
for(i=0; i<PyList_GET_SIZE(list); i++) {
name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0);
item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1);
#if PY_MAJOR_VERSION >= 3
utf8_name = PyUnicode_AsUTF8String(name);
if (!utf8_name) goto bad;
s = PyBytes_AS_STRING(utf8_name);
if (%(IMPORT_STAR_SET)s(item, name, s) < 0) goto bad;
Py_DECREF(utf8_name); utf8_name = 0;
#else
s = PyString_AsString(name);
if (!s) goto bad;
if (%(IMPORT_STAR_SET)s(item, name, s) < 0) goto bad;
#endif
}
ret = 0;
bad:
Py_XDECREF(locals);
Py_XDECREF(list);
#if PY_MAJOR_VERSION >= 3
Py_XDECREF(utf8_name);
#endif
return ret;
}
""" % {'IMPORT_STAR' : Naming.import_star,
'IMPORT_STAR_SET' : Naming.import_star_set }
refnanny_utility_code = UtilityCode.load_cached("Refnanny", "ModuleSetupCode.c")
main_method = UtilityCode.load("MainFunction", "Embed.c")
packed_struct_utility_code = UtilityCode(proto="""
#if defined(__GNUC__)
#define __Pyx_PACKED __attribute__((__packed__))
#else
#define __Pyx_PACKED
#endif
""", impl="", proto_block='utility_code_proto_before_types')
capsule_utility_code = UtilityCode.load("Capsule")
|
suda/micropython
|
refs/heads/master
|
tests/basics/enumerate.py
|
54
|
print(list(enumerate([])))
print(list(enumerate([1, 2, 3])))
print(list(enumerate([1, 2, 3], 5)))
print(list(enumerate([1, 2, 3], -5)))
print(list(enumerate(range(1000))))
# specifying args with keywords
print(list(enumerate([1, 2, 3], start=1)))
print(list(enumerate(iterable=[1, 2, 3])))
print(list(enumerate(iterable=[1, 2, 3], start=1)))
|
alexryndin/ambari
|
refs/heads/branch-adh-1.5
|
ambari-server/src/main/resources/stacks/BigInsights/4.2/services/KAFKA/package/scripts/status_params.py
|
14
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
config = Script.get_config()
kafka_pid_dir = config['configurations']['kafka-env']['kafka_pid_dir']
kafka_pid_file = format("{kafka_pid_dir}/kafka.pid")
|
haguenau/otp
|
refs/heads/maint
|
lib/asn1/test/asn1_SUITE_data/Set.py
|
94
|
Set DEFINITIONS IMPLICIT TAGS ::=
BEGIN
IMPORTS Seq1 FROM SeqSetLib;
Set ::= SET
{
bool BOOLEAN,
boolCon [20] BOOLEAN,
boolPri [PRIVATE 21] BOOLEAN,
boolApp [APPLICATION 22] BOOLEAN,
boolExpCon [30] EXPLICIT BOOLEAN,
boolExpPri [PRIVATE 31] EXPLICIT BOOLEAN,
boolExpApp [APPLICATION 32] EXPLICIT BOOLEAN
}
Set1 ::= SET
{
bool1 BOOLEAN,
int1 INTEGER,
set1 SetIn
}
Set2 ::= SET
{
set2 SetIn,
bool2 BOOLEAN,
int2 INTEGER
}
Set3 ::= SET
{
bool3 BOOLEAN,
set3 SetIn,
int3 INTEGER
}
SetDef1 ::= SET
{
bool1 BOOLEAN DEFAULT TRUE,
int1 INTEGER,
set1 SetIn DEFAULT {}
}
SetDef2 ::= SET
{
set2 SetIn DEFAULT {},
bool2 BOOLEAN,
int2 INTEGER
}
SetDef3 ::= SET
{
bool3 BOOLEAN DEFAULT TRUE,
set3 SetIn DEFAULT {},
int3 INTEGER DEFAULT 17
}
SetOpt1 ::= SET
{
bool1 BOOLEAN OPTIONAL,
int1 INTEGER,
set1 SetIn OPTIONAL
}
SetOpt2 ::= SET
{
set2 SetIn OPTIONAL,
bool2 BOOLEAN,
int2 INTEGER
}
SetOpt3 ::= SET
{
bool3 BOOLEAN OPTIONAL,
set3 SetIn OPTIONAL,
int3 INTEGER OPTIONAL
}
SetIn ::= SET
{
boolIn BOOLEAN OPTIONAL,
intIn INTEGER OPTIONAL
}
SetS1 ::= SET
{
boolS1 BOOLEAN,
intS1 INTEGER,
setS1 SET { boolIn BOOLEAN,
intIn INTEGER }
}
SetS2 ::= SET
{
setS2 SET { boolIn BOOLEAN,
intIn INTEGER },
boolS2 BOOLEAN,
intS2 INTEGER
}
SetS3 ::= SET
{
boolS3 BOOLEAN,
setS3 SET { boolIn BOOLEAN,
intIn INTEGER },
intS3 INTEGER
}
SetImp1 ::= SET
{
seq Seq1,
bool BOOLEAN,
int INTEGER
}
SetImp2 ::= SET
{
bool BOOLEAN,
seq Seq1,
int INTEGER
}
SetImp3 ::= SET
{
bool BOOLEAN,
int INTEGER,
seq Seq1
}
END
|
initNirvana/Easyphotos
|
refs/heads/master
|
env/lib/python3.4/site-packages/PIL/ImagePath.py
|
41
|
#
# The Python Imaging Library
# $Id$
#
# path interface
#
# History:
# 1996-11-04 fl Created
# 2002-04-14 fl Added documentation stub class
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
# the Python class below is overridden by the C implementation.
class Path:
def __init__(self, xy):
pass
##
# Compacts the path, by removing points that are close to each
# other. This method modifies the path in place.
def compact(self, distance=2):
pass
##
# Gets the bounding box.
def getbbox(self):
pass
##
# Maps the path through a function.
def map(self, function):
pass
##
# Converts the path to Python list.
#
# @param flat By default, this function returns a list of 2-tuples
# [(x, y), ...]. If this argument is true, it returns a flat
# list [x, y, ...] instead.
# @return A list of coordinates.
def tolist(self, flat=0):
pass
##
# Transforms the path.
def transform(self, matrix):
pass
# override with C implementation
Path = Image.core.path
|
tailorian/Sick-Beard
|
refs/heads/ThePirateBay
|
sickbeard/properFinder.py
|
5
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import operator
import sickbeard
from sickbeard import db
from sickbeard import exceptions
from sickbeard.exceptions import ex
from sickbeard import helpers, logger, show_name_helpers
from sickbeard import providers
from sickbeard import search
from sickbeard import history
from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from name_parser.parser import NameParser, InvalidNameException
class ProperFinder():
def __init__(self):
self.updateInterval = datetime.timedelta(hours=1)
def run(self):
if not sickbeard.DOWNLOAD_PROPERS:
return
# look for propers every night at 1 AM
updateTime = datetime.time(hour=1)
logger.log(u"Checking proper time", logger.DEBUG)
hourDiff = datetime.datetime.today().time().hour - updateTime.hour
dayDiff = (datetime.date.today() - self._get_lastProperSearch()).days
# if it's less than an interval after the update time then do an update
if hourDiff >= 0 and hourDiff < self.updateInterval.seconds / 3600 or dayDiff >=1:
logger.log(u"Beginning the search for new propers")
else:
return
propers = self._getProperList()
self._downloadPropers(propers)
self._set_lastProperSearch(datetime.datetime.today().toordinal())
def _getProperList(self):
propers = {}
# for each provider get a list of the propers
for curProvider in providers.sortedProviderList():
if not curProvider.isActive():
continue
search_date = datetime.datetime.today() - datetime.timedelta(days=2)
logger.log(u"Searching for any new PROPER releases from " + curProvider.name)
try:
curPropers = curProvider.findPropers(search_date)
except exceptions.AuthException, e:
logger.log(u"Authentication error: " + ex(e), logger.ERROR)
continue
# if they haven't been added by a different provider than add the proper to the list
for x in curPropers:
name = self._genericName(x.name)
if not name in propers:
logger.log(u"Found new proper: " + x.name, logger.DEBUG)
x.provider = curProvider
propers[name] = x
# take the list of unique propers and get it sorted by
sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
finalPropers = []
for curProper in sortedPropers:
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(curProper.name)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG)
continue
if not parse_result.episode_numbers:
logger.log(u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG)
continue
# populate our Proper instance
if parse_result.air_by_date:
curProper.season = -1
curProper.episode = parse_result.air_date
else:
curProper.season = parse_result.season_number if parse_result.season_number != None else 1
curProper.episode = parse_result.episode_numbers[0]
curProper.quality = Quality.nameQuality(curProper.name)
# for each show in our list
for curShow in sickbeard.showList:
if not parse_result.series_name:
continue
genericName = self._genericName(parse_result.series_name)
# get the scene name masks
sceneNames = set(show_name_helpers.makeSceneShowSearchStrings(curShow))
# for each scene name mask
for curSceneName in sceneNames:
# if it matches
if genericName == self._genericName(curSceneName):
logger.log(u"Successful match! Result " + parse_result.series_name + " matched to show " + curShow.name, logger.DEBUG)
# set the tvdbid in the db to the show's tvdbid
curProper.tvdbid = curShow.tvdbid
# since we found it, break out
break
# if we found something in the inner for loop break out of this one
if curProper.tvdbid != -1:
break
if curProper.tvdbid == -1:
continue
if not show_name_helpers.filterBadReleases(curProper.name):
logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, ignoring it", logger.DEBUG)
continue
show = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
if not show:
logger.log(u"Unable to find the show with tvdbid " + str(curProper.tvdbid), logger.ERROR)
continue
if show.rls_ignore_words and search.filter_release_name(curProper.name, show.rls_ignore_words):
logger.log(u"Ignoring " + curProper.name + " based on ignored words filter: " + show.rls_ignore_words, logger.MESSAGE)
continue
if show.rls_require_words and not search.filter_release_name(curProper.name, show.rls_require_words):
logger.log(u"Ignoring " + curProper.name + " based on required words filter: " + show.rls_require_words, logger.MESSAGE)
continue
# if we have an air-by-date show then get the real season/episode numbers
if curProper.season == -1 and curProper.tvdbid:
tvdb_lang = show.lang
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
try:
t = tvdb_api.Tvdb(**ltvdb_api_parms)
epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0]
curProper.season = int(epObj["seasonnumber"])
curProper.episodes = [int(epObj["episodenumber"])]
except tvdb_exceptions.tvdb_episodenotfound:
logger.log(u"Unable to find episode with date " + str(curProper.episode) + " for show " + parse_result.series_name + ", skipping", logger.WARNING)
continue
# check if we actually want this proper (if it's the right quality)
sqlResults = db.DBConnection().select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [curProper.tvdbid, curProper.season, curProper.episode])
if not sqlResults:
continue
oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"]))
# only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != curProper.quality:
continue
# if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season, curProper.episode) not in map(operator.attrgetter('tvdbid', 'season', 'episode'), finalPropers):
logger.log(u"Found a proper that we need: " + str(curProper.name))
finalPropers.append(curProper)
return finalPropers
def _downloadPropers(self, properList):
for curProper in properList:
historyLimit = datetime.datetime.today() - datetime.timedelta(days=30)
# make sure the episode has been downloaded before
myDB = db.DBConnection()
historyResults = myDB.select(
"SELECT resource FROM history "
"WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? "
"AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")",
[curProper.tvdbid, curProper.season, curProper.episode, curProper.quality, historyLimit.strftime(history.dateFormat)])
# if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it
if len(historyResults) == 0:
logger.log(u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.")
continue
else:
# make sure that none of the existing history downloads are the same proper we're trying to download
isSame = False
for curResult in historyResults:
# if the result exists in history already we need to skip it
if self._genericName(curResult["resource"]) == self._genericName(curProper.name):
isSame = True
break
if isSame:
logger.log(u"This proper is already in history, skipping it", logger.DEBUG)
continue
# get the episode object
showObj = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid)
if showObj == None:
logger.log(u"Unable to find the show with tvdbid " + str(curProper.tvdbid) + " so unable to download the proper", logger.ERROR)
continue
epObj = showObj.getEpisode(curProper.season, curProper.episode)
# make the result object
result = curProper.provider.getResult([epObj])
result.url = curProper.url
result.name = curProper.name
result.quality = curProper.quality
# snatch it
search.snatchEpisode(result, SNATCHED_PROPER)
def _genericName(self, name):
return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
def _set_lastProperSearch(self, when):
logger.log(u"Setting the last Proper search in the DB to " + str(when), logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM info")
if len(sqlResults) == 0:
myDB.action("INSERT INTO info (last_backlog, last_TVDB, last_proper_search) VALUES (?,?,?)", [0, 0, str(when)])
else:
myDB.action("UPDATE info SET last_proper_search=" + str(when))
def _get_lastProperSearch(self):
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM info")
try:
last_proper_search = datetime.date.fromordinal(int(sqlResults[0]["last_proper_search"]))
except:
return datetime.date.fromordinal(1)
return last_proper_search
|
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/distutils/command/clean.py
|
59
|
"""distutils.command.clean
Implements the Distutils 'clean' command."""
# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils import log
class clean(Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-lib=', None,
"build directory for all modules (default: 'build.build-lib')"),
('build-temp=', 't',
"temporary build directory (default: 'build.build-temp')"),
('build-scripts=', None,
"build directory for scripts (default: 'build.build-scripts')"),
('bdist-base=', None,
"temporary directory for built distributions"),
('all', 'a',
"remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'))
self.set_undefined_options('bdist',
('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.bdist_base,
self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
# class clean
|
was4444/chromium.src
|
refs/heads/nw15
|
tools/memory_inspector/memory_inspector/backends/__init__.py
|
12133432
| |
vaidap/zulip
|
refs/heads/master
|
tools/documentation_crawler/documentation_crawler/__init__.py
|
12133432
| |
Tiger66639/ansible-modules-core
|
refs/heads/devel
|
database/__init__.py
|
12133432
| |
LUKKIEN/wagtailsocialfeed
|
refs/heads/master
|
tests/app/models.py
|
12133432
| |
kmaglione/olympia
|
refs/heads/master
|
apps/addons/management/commands/personas_fix_mojibake.py
|
13
|
from getpass import getpass
from optparse import make_option
from time import time
from django.core.management.base import BaseCommand
from django.db import connection as django_connection, transaction
import MySQLdb as mysql
def debake(s):
for c in s:
try:
yield c.encode('windows-1252')
except UnicodeEncodeError:
yield c.encode('latin-1')
class Command(BaseCommand):
"""
Consult the personas database to find and fix mangled descriptions.
`host`: the host of the personas database
`database`: the personas database, eg: personas
`commit`: if yes, actually commit the transaction, for any other value, it
aborts the transaction at the end.
`users`: migrate user accounts?
`favorites`: migrate favorites for users?
"""
option_list = BaseCommand.option_list + (
make_option('--host', action='store',
dest='host', help='The host of MySQL'),
make_option('--db', action='store',
dest='db', help='The database in MySQL'),
make_option('--user', action='store',
dest='user', help='The database user'),
make_option('--commit', action='store',
dest='commit', help='If yes, then commits the run'),
make_option('--start', action='store', type="int",
dest='start', help='An optional offset to start at'),
)
def log(self, msg):
print msg
def commit_or_not(self, gogo):
if gogo == 'yes':
self.log('Committing changes.')
transaction.commit()
else:
self.log('Not committing changes, this is a dry run.')
transaction.rollback()
def connect(self, **options):
options = dict([(k, v) for k, v in options.items() if k in
['host', 'db', 'user'] and v])
options['passwd'] = getpass('MySQL Password: ')
options['charset'] = 'latin1'
options['use_unicode'] = False
if options['host'][0] == '/':
options['unix_socket'] = options['host']
del options['host']
self.connection = mysql.connect(**options)
self.cursor = self.connection.cursor()
self.cursor_z = django_connection.cursor()
def do_fix(self, offset, limit, **options):
self.log('Processing themes %s to %s' % (offset, offset + limit))
ids = []
descs = []
for theme in self.get_themes(limit, offset):
if max(theme[1]) > u'\x7f':
try:
descs.append(''.join(debake(theme[1])))
ids.append(theme[0])
except UnicodeEncodeError:
# probably already done?
print "skipped", theme[0]
else:
print "clean", theme[0]
if ids:
targets = self.find_needed_fixes(ids, descs)
self.fix_descs(targets)
def find_needed_fixes(self, ids, descs):
original_descs = self.get_original_descs(ids)
for id, d, original_d in zip(ids, descs, original_descs):
if d == original_d:
yield id, d
def get_original_descs(self, ids):
qs = ', '.join(['%s'] * len(ids))
self.cursor.execute(
"SELECT description from personas where id in (%s)" % qs, ids)
return (x[0] for x in self.cursor.fetchall())
def fix_descs(self, targets):
for id, desc in targets:
try:
desc.decode('utf-8')
print "FIX", id
except UnicodeDecodeError:
print "SKIPPED", id
continue
self.cursor_z.execute(
'UPDATE translations AS t, personas AS p '
'SET t.localized_string = %s, '
't.localized_string_clean = NULL '
'WHERE t.id = p.description AND p.persona_id = %s', [desc, id])
def count_themes(self):
self.cursor_z.execute('SELECT count(persona_id) from personas')
return self.cursor_z.fetchone()[0]
def get_themes(self, limit, offset):
self.cursor_z.execute(
'SELECT p.persona_id, t.localized_string from personas as p, '
'translations as t where t.id = p.description and '
't.localized_string != "" LIMIT %s OFFSET %s' % (limit, offset))
return self.cursor_z.fetchall()
@transaction.commit_manually
def handle(self, *args, **options):
t_total_start = time()
self.connect(**options)
self.log(
"Fixing mojibake in theme descriptions. Think these mangled "
"strings are bad? Have a look at "
"https://en.wikipedia.org/wiki/File:Letter_to_Russia"
"_with_krokozyabry.jpg")
try:
count = self.count_themes()
self.log("Found %s themes. Hope you're not in a hurry" % count)
step = 2500
start = options.get('start', 0)
self.log("Starting at offset: %s" % start)
for offset in range(start, count, step):
t_start = time()
self.do_fix(offset, step, **options)
self.commit_or_not(options.get('commit'))
t_average = 1 / ((time() - t_total_start) /
(offset - start + step))
print "> %.2fs for %s themes. Averaging %.2f themes/s" % (
time() - t_start, step, t_average)
except:
self.log('Error, not committing changes.')
transaction.rollback()
raise
finally:
self.commit_or_not(options.get('commit'))
self.log("Done. Total time: %s seconds" % (time() - t_total_start))
|
perryl/morph
|
refs/heads/master
|
morphlib/buildsystem_tests.py
|
1
|
# Copyright (C) 2012-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import morphlib
class BuildSystemTests(unittest.TestCase):
def setUp(self):
self.bs = morphlib.buildsystem.BuildSystem()
def test_has_configure_commands(self):
self.assertEqual(self.bs['configure-commands'], [])
def test_has_build_commands(self):
self.assertEqual(self.bs['build-commands'], [])
def test_has_test_commands(self):
self.assertEqual(self.bs['test-commands'], [])
def test_has_install_commands(self):
self.assertEqual(self.bs['install-commands'], [])
def test_returns_morphology(self):
self.bs.name = 'fake'
morph = self.bs.get_morphology('foobar')
self.assertTrue(morph.__class__.__name__ == 'Morphology')
def test_construct_from_dict(self):
'''Test parsing a dict of information from a DEFAULTS file.'''
commands_dict = {
'configure-commands': 'foo'
}
self.bs.from_dict('test', commands_dict)
self.assertEqual(self.bs.configure_commands, 'foo')
|
petemounce/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/misc/kibana_plugin.py
|
49
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to manage elasticsearch shield role
# (c) 2016, Thierno IB. BARRY @barryib
# Sponsored by Polyconseil http://polyconseil.fr.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kibana_plugin
short_description: Manage Kibana plugins
description:
- Manages Kibana plugins.
version_added: "2.2"
author: Thierno IB. BARRY (@barryib)
options:
name:
description:
- Name of the plugin to install
required: True
state:
description:
- Desired state of a plugin.
required: False
choices: ["present", "absent"]
default: present
url:
description:
- Set exact URL to download the plugin from.
For local file, prefix its absolute path with file://
required: False
default: None
timeout:
description:
- "Timeout setting: 30s, 1m, 1h..."
required: False
default: 1m
plugin_bin:
description:
- Location of the plugin binary
required: False
default: /opt/kibana/bin/kibana
plugin_dir:
description:
- Your configured plugin directory specified in Kibana
required: False
default: /opt/kibana/installedPlugins/
version:
description:
- Version of the plugin to be installed.
If plugin exists with previous version, it will NOT be updated if C(force) is not set to yes
required: False
default: None
force:
description:
- Delete and re-install the plugin. Can be useful for plugins update
required: False
choices: ["yes", "no"]
default: no
'''
EXAMPLES = '''
- name: Install Elasticsearch head plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
- name: Install specific version of a plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
version: '2.3.3'
- name: Uninstall Elasticsearch head plugin
kibana_plugin:
state: absent
name: elasticsearch/marvel
'''
RETURN = '''
cmd:
description: the launched command during plugin mangement (install / remove)
returned: success
type: string
name:
description: the plugin name to install or remove
returned: success
type: string
url:
description: the url from where the plugin is installed from
returned: success
type: string
timeout:
description: the timout for plugin download
returned: success
type: string
stdout:
description: the command stdout
returned: success
type: string
stderr:
description: the command stderr
returned: success
type: string
state:
description: the state for the managed plugin
returned: success
type: string
'''
import os
PACKAGE_STATE_MAP = dict(
present="--install",
absent="--remove"
)
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, url, timeout):
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
if url:
cmd_args.append("--url %s" % url)
if timeout:
cmd_args.append("--timeout %s" % timeout)
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
url=dict(default=None),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
version=dict(default=None),
force=dict(default="no", type="bool")
),
supports_check_mode=True,
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
timeout = module.params["timeout"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
version = module.params["version"]
force = module.params["force"]
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
# skip if the state is correct
if (present and state == "present" and not force) or (state == "absent" and not present and not force):
module.exit_json(changed=False, name=name, state=state)
if (version):
name = name + '/' + version
if state == "present":
if force:
remove_plugin(module, plugin_bin, name)
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
woobe/h2o
|
refs/heads/master
|
h2o-perf/bench/py/h2oPerf/Scrape.py
|
1
|
from Table import *
import json
import os
import re
import subprocess
import time
import MySQLdb
class Scraper:
"""
Objects of this class scrape the R stdouterr for
relevant information that needs to percolate back
to the database.
Because of the different phases (parse, model, predict),
there is a switch that redirects control to a subclass
scraper of the appropriate type.
Each phase will insert a row into the test_run_phase_result
table in the db, and possibly percolate pieces of the test_run
table back to the PerfRunner object.
Some subclasses of the Scraper will insert data into the math results tables.
"""
def __init__(self, perfdb, phase, test_dir, test_short_dir, output_dir, output_file_name):
self.perfdb = perfdb
self.phase = phase
self.test_dir = test_dir
self.test_short_dir = test_short_dir
self.output_dir = output_dir
self.output_file_name = output_file_name
self.did_time_pass = 0
self.did_correct_pass = 0
self.contaminated = 0
self.contamination_message = ""
def scrape(self):
"""
Switches out to the phase scraper for scraping R output.
The subclass object is then invoked and an object with
table information is percolated back to the caller.
"""
phase_scraper = self.__switch__()
res = phase_scraper.invoke()
self.did_time_pass = phase_scraper.did_time_pass
self.did_correct_pass = phase_scraper.did_correct_pass
self.contaminated = phase_scraper.contaminated
self.contamination_message = phase_scraper.contamination_message
return res
def __switch__(self):
"""
Switch to scraper for the appropriate phase.
"""
return {
'parse': ParseScraper(self),
'model': ModelScraper(self),
'predict': PredictScraper(self),
}[self.phase]
class ParseScraper(Scraper):
"""
An object that performs the scraping for the Parse phase.
Relevant tables and their fields:
>test_run:
[dataset_name, dataset_source, train_dataset_url, test_dataset_url]
>test_run_phase_result:
[phase_name, start/end_epoch_ms, stdouterr, passed, correctness_passed,
timing_passed, contaminated, contamination_message]
"""
def __init__(self, object):
self.perfdb = object.perfdb
self.phase = object.phase
self.test_dir = object.test_dir
self.test_short_dir = object.test_short_dir
self.output_dir = object.output_dir
self.output_file_name = object.output_file_name
self.contamination = os.path.join(self.output_dir, "contamination_message")
self.contaminated = 1 if os.path.exists(self.contamination) else 0
self.contamination_message = "No contamination."
if self.contaminated:
with open(self.contamination, "r") as f:
self.contamination_message = MySQLdb.escape_string(f.read().replace('\n', ''))
self.did_correct_pass = 0
self.did_time_pass = 0
self.test_run = {
'dataset_source': '',
'train_dataset_url': '',
'test_dataset_url': '',
}
def invoke(self):
"""
Scrapes the stdouterr from the R phase. Inserts into results tables.
The phase result is handled in the __init__ of this object.
The work be done here is on the self.test_run dictionary
@return: test_run dictionary
"""
self.insert_phase_result()
self.test_run.update(self.__scrape_parse_result__())
return self.test_run
def insert_phase_result(self):
trpr = TableRow("test_run_phase_result", self.perfdb)
with open(self.output_file_name, "r") as f:
trpr.row['stdouterr'] = MySQLdb.escape_string(f.read().replace('\n', ''))
trpr.row['contaminated'] = self.contaminated
trpr.row['contamination_message'] = self.contamination_message
trpr.row.update(self.__scrape_phase_result__())
trpr.update()
def __scrape_phase_result__(self):
phase_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
phase_r = json.loads(line)
flag = False
break
if "PHASE RESULT" in line and "print" not in line:
flag = True
self.did_correct_pass = int(phase_r['phase_result']['correctness_passed'])
self.did_time_pass = int(phase_r['phase_result']['timing_passed'])
return phase_r['phase_result']
def __scrape_parse_result__(self):
parse_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
parse_r = json.loads(line)
flag = False
break
if "PARSE RESULT" in line and "print" not in line:
flag = True
return parse_r['parse_result']
class ModelScraper(Scraper):
"""
An object that performs the scraping for the Model phase.
Relevant tables and their fields:
>test_run_clustering_result:
[k, withinss]
>test_run_model_result:
[model_json]
>test_run_phase_result:
[phase_name, start/end_epoch_ms, stdouterr, passed, correctness_passed,
timing_passed, contaminated, contamination_message]
"""
def __init__(self, object):
self.perfdb = object.perfdb
self.phase = object.phase
self.test_dir = object.test_dir
self.test_short_dir = object.test_short_dir
self.output_dir = object.output_dir
self.output_file_name = object.output_file_name
self.contamination = os.path.join(self.output_dir, "contamination_message")
self.contaminated = 1 if os.path.exists(self.contamination) else 0
self.contamination_message = "No contamination."
if self.contaminated:
with open(self.contamination, "r") as f:
self.contamination_message = MySQLdb.escape_string(f.read().replace('\n', ''))
self.did_correct_pass = 0
self.did_time_pass = 0
self.test_run_model_result = TableRow("test_run_model_result", self.perfdb)
def invoke(self):
"""
Scrapes the stdouterr from the R phase. Inserts into results tables.
Additionally handles the KMeans clustering results table.
@return: None
"""
self.insert_phase_result()
kmeans_result = self.__scrape_kmeans_result__()
if kmeans_result:
self.test_run_clustering_result = TableRow("test_run_clustering_result", self.perfdb)
self.test_run_clustering_result.row.update(kmeans_result)
self.test_run_clustering_result.update()
comp_result = self.__scrape_comparison_result__()
if comp_result != "":
self.test_run_binomial_comparison_result = TableRow("test_run_binomial_comparison", self.perfdb)
self.test_run_binomial_comparison_result.row.update(comp_result['comparison_result'])
self.test_run_binomial_comparison_result.update()
else:
self.test_run_model_result.row['model_json'] = \
MySQLdb.escape_string(str(self.__scrape_model_result__()))
self.test_run_model_result.update()
return None
def insert_phase_result(self):
trpr = TableRow("test_run_phase_result", self.perfdb)
with open(self.output_file_name, "r") as f:
trpr.row['stdouterr'] = MySQLdb.escape_string(f.read().replace('\n', ''))
trpr.row['contaminated'] = self.contaminated
trpr.row['contamination_message'] = self.contamination_message
trpr.row.update(self.__scrape_phase_result__())
trpr.update()
def __scrape_comparison_result__(self):
comparison_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
comparison_r = json.loads(line)
flag = False
break
if "COMPARISON" in line and "print" not in line:
flag = True
return comparison_r
def __scrape_phase_result__(self):
phase_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
phase_r = json.loads(line)
flag = False
break
if "PHASE RESULT" in line and "print" not in line:
flag = True
self.did_correct_pass = int(phase_r['phase_result']['correctness_passed'])
self.did_time_pass = int(phase_r['phase_result']['timing_passed'])
return phase_r['phase_result']
def __scrape_kmeans_result__(self):
kmeans_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
kmeans_r = json.loads(line)
flag = False
break
if "KMEANS RESULT" in line and "print" not in line:
flag = True
return None if kmeans_r["kmeans_result"]["k"] == "None" else kmeans_r["kmeans_result"]
def __scrape_model_result__(self):
model_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
model_r = json.loads(line)
flag = False
break
if "MODEL RESULT" in line and "print" not in line:
flag = True
return model_r["model_result"]["model_json"]
class PredictScraper(Scraper):
"""
An object that performs the scraping for the Predict phase.
This object is not awlays used, e.g. in the case of KMeans and PCA,
there is no prediction phase, but the results still need to be
verified.
Relevant tables and their fields:
>test_run_binomial_classification_result:
[auc, precision, recall, error_rate, minoriy_error_rate]
>test_run_cm_result:
[levels_json, cm_json, representation]
>test_run_multinomial_classification_result:
[level, level_actual_count, level_predicted_correctly_count, level_error_rate]
>test_run_phase_result:
[phase_name, start/end_epoch_ms, stdouterr, passed, correctness_passed,
timing_passed, contaminated, contamination_message]
>test_run_regression_result:
[aic, null_deviance, residual_deviance]
"""
def __init__(self, object):
self.perfdb = object.perfdb
self.phase = object.phase
self.test_dir = object.test_dir
self.test_short_dir = object.test_short_dir
self.output_dir = object.output_dir
self.output_file_name = object.output_file_name
self.contamination = os.path.join(self.output_dir, "contamination_message")
self.contaminated = 1 if os.path.exists(self.contamination) else 0
self.contamination_message = "No contamination."
if self.contaminated:
with open(self.contamination, "r") as f:
self.contamination_message = MySQLdb.escape_string(f.read().replace('\n', ''))
self.did_correct_pass = 0
self.did_time_pass = 0
self.test_run_binomial_classification_result = ""
self.test_run_cm_result = ""
self.test_run_phase_result = ""
self.test_run_regression_result = ""
self.test_run_binomial_comparison_result = ""
self.test_run_multinomial_classification_result = ""
def invoke(self):
"""
Scrapes the stdouterr from the R phase.
This invoke method will pass off control to the appropriate result scraper
using the __switch__ override.
Some preliminary scraping will be done here to obtain the correct result type.
@return: None
"""
self.insert_phase_result()
predict_type = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
print "---------------------------------"
print line
print "---------------------------------"
predict_type = self.__get_predict_type__(line.strip())[0]
flag = False
break
if "PREDICT TYPE" in line and "print" not in line:
flag = True
self.result_type = predict_type
print "GOT RESULT TYPE: " + predict_type
self.__switch__()
return None
def insert_phase_result(self):
trpr = TableRow("test_run_phase_result", self.perfdb)
with open(self.output_file_name, "r") as f:
trpr.row['stdouterr'] = MySQLdb.escape_string(f.read().replace('\n', ''))
trpr.row['contaminated'] = self.contaminated
trpr.row['contamination_message'] = self.contamination_message
trpr.row.update(self.__scrape_phase_result__())
trpr.update()
def __scrape_phase_result__(self):
phase_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
phase_r = json.loads(line)
flag = False
break
if "PHASE RESULT" in line and "print" not in line:
flag = True
self.did_correct_pass = int(phase_r['phase_result']['correctness_passed'])
self.did_time_pass = int(phase_r['phase_result']['timing_passed'])
return phase_r['phase_result']
def __get_predict_type__(self, type_candidate):
"""
Returns the type: 'parse', 'model', 'predict'
"""
print "TYPE CANDIDATE: " + type_candidate
types = ['binomial', 'regression', 'multinomial', 'cm']
rf = type_candidate.lower()
print "RETURNING TYPE: " + str( [t for t in types if t in rf])
return [t for t in types if t in rf]
def __switch__(self):
"""
Overrides the __switch__ method of the parent class.
This switch method handles the different types of math
results: regression, multionomial classification, CM result,
binomial classification
Multinomial classification is the only case where there will
be multiple rows inserted, all other results constitute a single row
in their respective tables.
One important note is that the scrapers in this case handle the
database insertions.
"""
print "SWITCHING TO " + self.result_type
obj = {'regression' : self.__scrape_regression_result__,
'cm' : self.__scrape_cm_result__,
'multinomial': self.__scrape_multinomial_result__,
'binomial' : self.__scrape_binomial_result__,
'comparison' : self.__scrape_comparison_result__,
}.get(self.result_type, "bad key")
if self.result_type in ['multinomial', 'binomial']:
self.__scrape_cm_result__()
return obj()
def __scrape_regression_result__(self):
regression_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
regression_r = json.loads(line)
flag = False
break
if "REGRESSION" in line and "print" not in line:
flag = True
#do the insert
def __scrape_cm_result__(self):
cm_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
cm_r = json.loads(line)
flag = False
break
if "CM RESULTS JSON" in line and "print" not in line:
flag = True
self.test_run_cm_result = TableRow("test_run_cm_result", self.perfdb)
self.test_run_cm_result.row.update(cm_r["cm_json"])
self.test_run_cm_result.update()
def __scrape_binomial_result__(self):
binomial_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
binomial_r = json.loads(line)
flag = False
break
if "BINOMIAL" in line and "print" not in line:
flag = True
self.test_run_binomial_classification_result = TableRow("test_run_binomial_classification_result", self.perfdb)
self.test_run_binomial_classification_result.row.update(binomial_r['binomial_result'])
self.test_run_binomial_classification_result.update()
return None
def __scrape_multinomial_result__(self):
multinomial_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
multinomial_r = json.loads(line)
flag = False
break
if "MULTINOMIAL" in line and "print" not in line:
flag = True
for level in multinomial_r["multinomial_result"]:
self.test_run_multinomial_classification_result = TableRow("test_run_multinomial_classification_result", self.perfdb)
self.test_run_multinomial_classification_result.row.update(level)
self.test_run_multinomial_classification_result.update()
def __scrape_comparison_result__(self):
comparison_r = ""
with open(self.output_file_name, "r") as f:
flag = False
for line in f:
if flag:
comparison_r = json.loads(line)
flag = False
break
if "COMPARISON" in line and "print" not in line:
flag = True
self.test_run_binomial_comparison_result = TableRow("test_run_binomial_comparison", self.perfdb)
self.test_run_binomial_comparison_result.row.update(comparison_r['comparison_result'])
self.test_run_binomial_comparison_result.update()
|
ahmed-mahran/hue
|
refs/heads/master
|
desktop/core/ext-py/python-daemon/test/scaffold.py
|
39
|
# -*- coding: utf-8 -*-
# test/scaffold.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2007–2009 Ben Finney <ben+python@benfinney.id.au>
# This is free software; you may copy, modify and/or distribute this work
# under the terms of the GNU General Public License, version 2 or later.
# No warranty expressed or implied. See the file LICENSE.GPL-2 for details.
""" Scaffolding for unit test modules.
"""
import unittest
import doctest
import logging
import os
import sys
import operator
import textwrap
from minimock import (
Mock,
TraceTracker as MockTracker,
mock,
restore as mock_restore,
)
test_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(test_dir)
if not test_dir in sys.path:
sys.path.insert(1, test_dir)
if not parent_dir in sys.path:
sys.path.insert(1, parent_dir)
# Disable all but the most critical logging messages
logging.disable(logging.CRITICAL)
def get_python_module_names(file_list, file_suffix='.py'):
""" Return a list of module names from a filename list. """
module_names = [m[:m.rfind(file_suffix)] for m in file_list
if m.endswith(file_suffix)]
return module_names
def get_test_module_names(module_list, module_prefix='test_'):
""" Return the list of module names that qualify as test modules. """
module_names = [m for m in module_list
if m.startswith(module_prefix)]
return module_names
def make_suite(path=test_dir):
""" Create the test suite for the given path. """
loader = unittest.TestLoader()
python_module_names = get_python_module_names(os.listdir(path))
test_module_names = get_test_module_names(python_module_names)
suite = loader.loadTestsFromNames(test_module_names)
return suite
def get_function_signature(func):
""" Get the function signature as a mapping of attributes. """
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_defaults = {}
func_defaults = ()
if func.func_defaults is not None:
func_defaults = func.func_defaults
for (name, value) in zip(arg_names[::-1], func_defaults[::-1]):
arg_defaults[name] = value
signature = {
'name': func.__name__,
'arg_count': arg_count,
'arg_names': arg_names,
'arg_defaults': arg_defaults,
}
non_pos_names = list(func.func_code.co_varnames[arg_count:])
COLLECTS_ARBITRARY_POSITIONAL_ARGS = 0x04
if func.func_code.co_flags & COLLECTS_ARBITRARY_POSITIONAL_ARGS:
signature['var_args'] = non_pos_names.pop(0)
COLLECTS_ARBITRARY_KEYWORD_ARGS = 0x08
if func.func_code.co_flags & COLLECTS_ARBITRARY_KEYWORD_ARGS:
signature['var_kw_args'] = non_pos_names.pop(0)
return signature
def format_function_signature(func):
""" Format the function signature as printable text. """
signature = get_function_signature(func)
args_text = []
for arg_name in signature['arg_names']:
if arg_name in signature['arg_defaults']:
arg_default = signature['arg_defaults'][arg_name]
arg_text_template = "%(arg_name)s=%(arg_default)r"
else:
arg_text_template = "%(arg_name)s"
args_text.append(arg_text_template % vars())
if 'var_args' in signature:
args_text.append("*%(var_args)s" % signature)
if 'var_kw_args' in signature:
args_text.append("**%(var_kw_args)s" % signature)
signature_args_text = ", ".join(args_text)
func_name = signature['name']
signature_text = (
"%(func_name)s(%(signature_args_text)s)" % vars())
return signature_text
class TestCase(unittest.TestCase):
""" Test case behaviour. """
def failUnlessRaises(self, exc_class, func, *args, **kwargs):
""" Fail unless the function call raises the expected exception.
Fail the test if an instance of the exception class
``exc_class`` is not raised when calling ``func`` with the
arguments ``*args`` and ``**kwargs``.
"""
try:
super(TestCase, self).failUnlessRaises(
exc_class, func, *args, **kwargs)
except self.failureException:
exc_class_name = exc_class.__name__
msg = (
"Exception %(exc_class_name)s not raised"
" for function call:"
" func=%(func)r args=%(args)r kwargs=%(kwargs)r"
) % vars()
raise self.failureException(msg)
def failIfIs(self, first, second, msg=None):
""" Fail if the two objects are identical.
Fail the test if ``first`` and ``second`` are identical,
as determined by the ``is`` operator.
"""
if first is second:
if msg is None:
msg = "%(first)r is %(second)r" % vars()
raise self.failureException(msg)
def failUnlessIs(self, first, second, msg=None):
""" Fail unless the two objects are identical.
Fail the test unless ``first`` and ``second`` are
identical, as determined by the ``is`` operator.
"""
if first is not second:
if msg is None:
msg = "%(first)r is not %(second)r" % vars()
raise self.failureException(msg)
assertIs = failUnlessIs
assertNotIs = failIfIs
def failIfIn(self, first, second, msg=None):
""" Fail if the second object is in the first.
Fail the test if ``first`` contains ``second``, as
determined by the ``in`` operator.
"""
if second in first:
if msg is None:
msg = "%(second)r is in %(first)r" % vars()
raise self.failureException(msg)
def failUnlessIn(self, first, second, msg=None):
""" Fail unless the second object is in the first.
Fail the test unless ``first`` contains ``second``, as
determined by the ``in`` operator.
"""
if second not in first:
if msg is None:
msg = "%(second)r is not in %(first)r" % vars()
raise self.failureException(msg)
assertIn = failUnlessIn
assertNotIn = failIfIn
def failUnlessOutputCheckerMatch(self, want, got, msg=None):
""" Fail unless the specified string matches the expected.
Fail the test unless ``want`` matches ``got``, as
determined by a ``doctest.OutputChecker`` instance. This
is not an equality check, but a pattern match according to
the ``OutputChecker`` rules.
"""
checker = doctest.OutputChecker()
want = textwrap.dedent(want)
source = ""
example = doctest.Example(source, want)
got = textwrap.dedent(got)
checker_optionflags = reduce(operator.or_, [
doctest.ELLIPSIS,
])
if not checker.check_output(want, got, checker_optionflags):
if msg is None:
diff = checker.output_difference(
example, got, checker_optionflags)
msg = "\n".join([
"Output received did not match expected output",
"%(diff)s",
]) % vars()
raise self.failureException(msg)
assertOutputCheckerMatch = failUnlessOutputCheckerMatch
def failUnlessMockCheckerMatch(self, want, tracker=None, msg=None):
""" Fail unless the mock tracker matches the wanted output.
Fail the test unless `want` matches the output tracked by
`tracker` (defaults to ``self.mock_tracker``. This is not
an equality check, but a pattern match according to the
``minimock.MinimockOutputChecker`` rules.
"""
if tracker is None:
tracker = self.mock_tracker
if not tracker.check(want):
if msg is None:
diff = tracker.diff(want)
msg = "\n".join([
"Output received did not match expected output",
"%(diff)s",
]) % vars()
raise self.failureException(msg)
def failIfMockCheckerMatch(self, want, tracker=None, msg=None):
""" Fail if the mock tracker matches the specified output.
Fail the test if `want` matches the output tracked by
`tracker` (defaults to ``self.mock_tracker``. This is not
an equality check, but a pattern match according to the
``minimock.MinimockOutputChecker`` rules.
"""
if tracker is None:
tracker = self.mock_tracker
if tracker.check(want):
if msg is None:
diff = tracker.diff(want)
msg = "\n".join([
"Output received matched specified undesired output",
"%(diff)s",
]) % vars()
raise self.failureException(msg)
assertMockCheckerMatch = failUnlessMockCheckerMatch
assertNotMockCheckerMatch = failIfMockCheckerMatch
def failIfIsInstance(self, obj, classes, msg=None):
""" Fail if the object is an instance of the specified classes.
Fail the test if the object ``obj`` is an instance of any
of ``classes``.
"""
if isinstance(obj, classes):
if msg is None:
msg = (
"%(obj)r is an instance of one of %(classes)r"
) % vars()
raise self.failureException(msg)
def failUnlessIsInstance(self, obj, classes, msg=None):
""" Fail unless the object is an instance of the specified classes.
Fail the test unless the object ``obj`` is an instance of
any of ``classes``.
"""
if not isinstance(obj, classes):
if msg is None:
msg = (
"%(obj)r is not an instance of any of %(classes)r"
) % vars()
raise self.failureException(msg)
assertIsInstance = failUnlessIsInstance
assertNotIsInstance = failIfIsInstance
def failUnlessFunctionInTraceback(self, traceback, function, msg=None):
""" Fail if the function is not in the traceback.
Fail the test if the function ``function`` is not at any
of the levels in the traceback object ``traceback``.
"""
func_in_traceback = False
expect_code = function.func_code
current_traceback = traceback
while current_traceback is not None:
if expect_code is current_traceback.tb_frame.f_code:
func_in_traceback = True
break
current_traceback = current_traceback.tb_next
if not func_in_traceback:
if msg is None:
msg = (
"Traceback did not lead to original function"
" %(function)s"
) % vars()
raise self.failureException(msg)
assertFunctionInTraceback = failUnlessFunctionInTraceback
def failUnlessFunctionSignatureMatch(self, first, second, msg=None):
""" Fail if the function signatures do not match.
Fail the test if the function signature does not match
between the ``first`` function and the ``second``
function.
The function signature includes:
* function name,
* count of named parameters,
* sequence of named parameters,
* default values of named parameters,
* collector for arbitrary positional arguments,
* collector for arbitrary keyword arguments.
"""
first_signature = get_function_signature(first)
second_signature = get_function_signature(second)
if first_signature != second_signature:
if msg is None:
first_signature_text = format_function_signature(first)
second_signature_text = format_function_signature(second)
msg = (textwrap.dedent("""\
Function signatures do not match:
%(first_signature)r != %(second_signature)r
Expected:
%(first_signature_text)s
Got:
%(second_signature_text)s""")
) % vars()
raise self.failureException(msg)
assertFunctionSignatureMatch = failUnlessFunctionSignatureMatch
class Exception_TestCase(TestCase):
""" Test cases for exception classes. """
def __init__(self, *args, **kwargs):
""" Set up a new instance """
self.valid_exceptions = NotImplemented
super(Exception_TestCase, self).__init__(*args, **kwargs)
def setUp(self):
""" Set up test fixtures. """
for exc_type, params in self.valid_exceptions.items():
args = (None, ) * params['min_args']
params['args'] = args
instance = exc_type(*args)
params['instance'] = instance
super(Exception_TestCase, self).setUp()
def test_exception_instance(self):
""" Exception instance should be created. """
for params in self.valid_exceptions.values():
instance = params['instance']
self.failIfIs(None, instance)
def test_exception_types(self):
""" Exception instances should match expected types. """
for params in self.valid_exceptions.values():
instance = params['instance']
for match_type in params['types']:
match_type_name = match_type.__name__
fail_msg = (
"%(instance)r is not an instance of"
" %(match_type_name)s"
) % vars()
self.failUnless(
isinstance(instance, match_type),
msg=fail_msg)
|
johnnadratowski/wkhtmltopdf
|
refs/heads/master
|
scripts/release.py
|
19
|
#!/usr/bin/env python
#
# Copyright 2014 wkhtmltopdf authors
#
# This file is part of wkhtmltopdf.
#
# wkhtmltopdf is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wkhtmltopdf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with wkhtmltopdf. If not, see <http:#www.gnu.org/licenses/>.
import os, sys, platform, subprocess, build
def get_build_targets():
map = {}
for k, v in build.BUILDERS.iteritems():
if not v in map:
map[v] = []
map[v].append(k)
map[v].sort()
return map
def get_targets():
if platform.system() == 'Windows':
return ['msvc2013-win32', 'msvc2013-win64']
elif platform.system() == 'Darwin':
builders = ['osx']
else:
builders = ['source_tarball', 'linux_schroot', 'mingw64_cross']
targets, map = [], get_build_targets()
for builder in builders:
targets.extend(map[builder])
return targets
def build_target(basedir, target):
build.message('*************** building: %s\n\n' % target)
build.mkdir_p(basedir)
log = open(os.path.join(basedir, '%s.log' % target), 'w')
proc = subprocess.Popen([sys.executable, 'scripts/build.py', target],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1, cwd=os.path.join(basedir, '..'))
proc.stdin.close()
for line in iter(proc.stdout.readline, ''):
line = line.rstrip()+'\n'
if '\r' in line:
line = line[1+line.rindex('\r'):]
build.message(line)
log.write(line)
log.flush()
proc.stdout.close()
return proc.wait() == 0
def main():
rootdir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
basedir = os.path.join(rootdir, 'static-build')
os.chdir(os.path.join(rootdir, 'qt'))
build.shell('git clean -fdx')
build.shell('git reset --hard HEAD')
os.chdir(rootdir)
build.shell('git clean -fdx')
build.shell('git reset --hard HEAD')
build.shell('git submodule update')
status = {}
for target in get_targets():
if not build_target(basedir, target):
status[target] = 'failed'
continue
status[target] = 'success'
build.rmdir(os.path.join(basedir, target))
build.message('\n\n\nSTATUS\n======\n')
width = max([len(target) for target in status])
for target in sorted(status.keys()):
build.message('%s: %s\n' % (target.ljust(width), status[target]))
if __name__ == '__main__':
main()
|
dxxb/micropython
|
refs/heads/upstream-tracking
|
tests/wipy/time.py
|
67
|
import time
DAYS_PER_MONTH = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leap(year):
return (year % 4) == 0
def test():
seconds = 0
wday = 5 # Jan 1, 2000 was a Saturday
for year in range(2000, 2049):
print("Testing %d" % year)
yday = 1
for month in range(1, 13):
if month == 2 and is_leap(year):
DAYS_PER_MONTH[2] = 29
else:
DAYS_PER_MONTH[2] = 28
for day in range(1, DAYS_PER_MONTH[month] + 1):
secs = time.mktime((year, month, day, 0, 0, 0, 0, 0))
if secs != seconds:
print("mktime failed for %d-%02d-%02d got %d expected %d" % (year, month, day, secs, seconds))
tuple = time.localtime(seconds)
secs = time.mktime(tuple)
if secs != seconds:
print("localtime failed for %d-%02d-%02d got %d expected %d" % (year, month, day, secs, seconds))
return
seconds += 86400
if yday != tuple[7]:
print("locatime for %d-%02d-%02d got yday %d, expecting %d" % (year, month, day, tuple[7], yday))
return
if wday != tuple[6]:
print("locatime for %d-%02d-%02d got wday %d, expecting %d" % (year, month, day, tuple[6], wday))
return
yday += 1
wday = (wday + 1) % 7
def spot_test(seconds, expected_time):
actual_time = time.localtime(seconds)
for i in range(len(actual_time)):
if actual_time[i] != expected_time[i]:
print("time.localtime(", seconds, ") returned", actual_time, "expecting", expected_time)
return
print("time.localtime(", seconds, ") returned", actual_time, "(pass)")
test()
spot_test( 0, (2000, 1, 1, 0, 0, 0, 5, 1))
spot_test( 1, (2000, 1, 1, 0, 0, 1, 5, 1))
spot_test( 59, (2000, 1, 1, 0, 0, 59, 5, 1))
spot_test( 60, (2000, 1, 1, 0, 1, 0, 5, 1))
spot_test( 3599, (2000, 1, 1, 0, 59, 59, 5, 1))
spot_test( 3600, (2000, 1, 1, 1, 0, 0, 5, 1))
spot_test( -1, (1999, 12, 31, 23, 59, 59, 4, 365))
spot_test( 447549467, (2014, 3, 7, 23, 17, 47, 4, 66))
spot_test( -940984933, (1970, 3, 7, 23, 17, 47, 5, 66))
spot_test(-1072915199, (1966, 1, 1, 0, 0, 1, 5, 1))
spot_test(-1072915200, (1966, 1, 1, 0, 0, 0, 5, 1))
spot_test(-1072915201, (1965, 12, 31, 23, 59, 59, 4, 365))
t1 = time.time()
time.sleep(2)
t2 = time.time()
print(abs(time.ticks_diff(t1, t2) -2) <= 1)
t1 = time.ticks_ms()
time.sleep_ms(50)
t2 = time.ticks_ms()
print(abs(time.ticks_diff(t1, t2)- 50) <= 1)
t1 = time.ticks_us()
time.sleep_us(1000)
t2 = time.ticks_us()
print(time.ticks_diff(t1, t2) < 1500)
print(time.ticks_diff(time.ticks_cpu(), time.ticks_cpu()) < 16384)
|
craigslist/python-climage
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/python
# Copyright 2013 craigslist
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''craigslist image package setuptools script.'''
import setuptools
import climage
setuptools.setup(
name='climage',
version=climage.__version__,
description='craigslist image package',
long_description=open('README.rst').read(),
author='craigslist',
author_email='opensource@craigslist.org',
url='http://craigslist.org/about/opensource',
packages=setuptools.find_packages(exclude=['test*']),
scripts=[
'bin/climageprocessor',
'bin/climageserver'],
test_suite='nose.collector',
install_requires=[
'clblob',
'clcommon',
'pgmagick',
'PIL'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Software Development :: Libraries :: Python Modules'])
|
kikocorreoso/brython
|
refs/heads/master
|
www/src/Lib/unittest/test/support.py
|
37
|
import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class _BaseLoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super().startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super().stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super().addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super().addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super().addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super().addUnexpectedSuccess(*args)
class LegacyLoggingResult(_BaseLoggingResult):
"""
A legacy TestResult implementation, without an addSubTest method,
which records its method calls.
"""
@property
def addSubTest(self):
raise AttributeError
class LoggingResult(_BaseLoggingResult):
"""
A TestResult implementation which records its method calls.
"""
def addSubTest(self, test, subtest, err):
if err is None:
self._events.append('addSubTestSuccess')
else:
self._events.append('addSubTestFailure')
super().addSubTest(test, subtest, err)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
|
SilverWingedSeraph/sws-dotfiles
|
refs/heads/master
|
stow/vim/.vim/bundle/editorconfig-vim/plugin/editorconfig-core-py/main.py
|
20
|
#!/usr/bin/env python
from editorconfig.main import main
if __name__ == "__main__":
main()
|
peterfpeterson/mantid
|
refs/heads/master
|
qt/python/mantidqt/widgets/workspacedisplay/table/table_model.py
|
3
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package.
from qtpy.QtCore import Qt, QAbstractTableModel, QModelIndex
from mantid.kernel import V3D
BATCH_SIZE = 3000
MINIMUM_BATCH_SIZE_ROWS = 100
class TableModel(QAbstractTableModel):
"""
A QAbstractTableModel for use with a QTableView
This implementation loads rows of the table in batches
More batches are loaded when the user scrolls down in the table
"""
ITEM_CHANGED_INVALID_DATA_MESSAGE = "Error: Trying to set invalid data for the column."
ITEM_CHANGED_UNKNOWN_ERROR_MESSAGE = "Unknown error occurred: {}"
def __init__(self, data_model, parent=None):
super().__init__(parent=parent)
self._data_model = data_model
self._row_count = 0
self._headers = []
self._row_batch_size = MINIMUM_BATCH_SIZE_ROWS
def setHorizontalHeaderLabels(self, labels):
self._headers = labels
def canFetchMore(self, index):
if index.isValid():
return False
return self._row_count < self._data_model.get_number_of_rows()
def fetchMore(self, index):
if index.isValid():
return
remainder = self._data_model.get_number_of_rows() - self._row_count
items_to_fetch = min(self._row_batch_size, remainder)
if items_to_fetch < 0:
return
self.beginInsertRows(QModelIndex(), self._row_count, self._row_count + items_to_fetch - 1)
self._row_count += items_to_fetch
self.endInsertRows()
def rowCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
else:
return self._row_count
def columnCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
return self._data_model.get_number_of_columns()
def headerData(self, section, orientation, role):
if role in (Qt.DisplayRole, Qt.EditRole) and orientation == Qt.Horizontal:
if section < len(self._headers):
return self._headers[section]
else:
return super().headerData(section, orientation, role)
def setHeaderData(self, section, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole and orientation == Qt.Horizontal:
self._defaultHeaders.insert(section, value)
self.headerDataChanged.emit(Qt.Horizontal, section, section + 1)
return True
else:
return False
def setData(self, index, value, role):
if index.isValid() and role == Qt.EditRole:
col = index.column()
row = index.row()
try:
self._data_model.set_cell_data(row, col, value, self.is_v3d(index))
except ValueError:
print(self.ITEM_CHANGED_INVALID_DATA_MESSAGE)
return False
except Exception as x:
print(self.ITEM_CHANGED_UNKNOWN_ERROR_MESSAGE.format(x))
return False
self.dataChanged.emit(index, index)
return True
else:
return False
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
if index.row() >= self.max_rows() or index.row() < 0:
return None
if role in (Qt.DisplayRole, Qt.EditRole):
data = self._data_model.get_cell(index.row(), index.column())
return str(data) if isinstance(data, V3D) else data
return None
def load_data(self, data_model):
self.beginResetModel()
self._data_model = data_model
self._headers = self._data_model.get_column_headers()
self._row_count = 0
self._update_row_batch_size()
self.endResetModel()
def _update_row_batch_size(self):
num_data_columns = self._data_model.get_number_of_columns()
if num_data_columns > 0:
self._row_batch_size = max(int(BATCH_SIZE/num_data_columns),
MINIMUM_BATCH_SIZE_ROWS)
else:
self._row_batch_size = MINIMUM_BATCH_SIZE_ROWS
def flags(self, index):
col = index.column()
editable = self._data_model.is_editable_column(col)
if editable:
return super().flags(index) | Qt.ItemIsEditable | Qt.ItemIsSelectable
else:
return super().flags(index) | Qt.ItemIsSelectable
def max_rows(self):
return self._data_model.get_number_of_rows()
def is_v3d(self, index):
col = index.column()
row = index.row()
return isinstance(self._data_model.get_cell(row, col), V3D)
|
WeichenXu123/spark
|
refs/heads/master
|
resource-managers/kubernetes/integration-tests/tests/pyfiles.py
|
26
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
if __name__ == "__main__":
"""
Usage: pyfiles [major_python_version]
"""
spark = SparkSession \
.builder \
.appName("PyFilesTest") \
.getOrCreate()
from py_container_checks import version_check
# Begin of Python container checks
version_check(sys.argv[1], 2 if sys.argv[1] == "python" else 3)
spark.stop()
|
nevil-brownlee/python-libtrace
|
refs/heads/master
|
doc/examples/icmp.py
|
1
|
#!/usr/bin/env python
# Thu, 13 Mar 14 (PDT)
# icmp.py: Demonstrate ICMP (v4) header decodes
# Copyright (C) 2017, Nevil Brownlee, U Auckland | WAND
from plt_testing import *
t = get_example_trace('icmp-sample.pcap')
n = 0; nicmp = 0
offset = 12
for pkt in t:
n += 1
icmp = pkt.icmp
if not icmp:
continue
print "%5d: " % (n),
print_icmp(icmp, offset)
print
nicmp += 1
#if nicmp == 10:
# break
t.close
print "%d packets examined\n" % (n)
|
nemonik/Intellect
|
refs/heads/master
|
intellect/examples/testing/ClassA.py
|
2
|
"""
Copyright (c) 2011, The MITRE Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
ClassA
Description: Intellect test fact
Initial Version: Feb 2, 2011
@author: Michael Joseph Walsh
"""
class ClassA(object):
'''
An example fact
'''
globalInClassA_1 = "a global"
globalInClassA_2 = "another global"
def __init__(self, property0 = None, property1 = None):
'''
ClassA initializer
'''
self.attribute1 = "attribute1's value"
self.__hiddenAttribute1 = "super secret hidden attribute. nah!"
self.property0 = property0
self.property1 = property1
print "created an instance of ClassA"
@property
def property0(self):
return self._property0
@property0.setter
def property0(self, value):
self._property0 = value
@property
def property1(self):
return self._property1
@property1.setter
def property1(self, value):
self._property1 = value
def someMethod(self):
print("someMethod called")
@staticmethod
def classAStaticMethod(self):
print("classAStaticMethod called")
def classASomeOtherMethod(self):
print("classASomeOtherMethd called")
def __classAHiddenMethod(self):
print("classAHiddenMethod called")
|
issackelly/django-security
|
refs/heads/master
|
security/password_expiry.py
|
3
|
# Copyright (c) 2011, SD Elements. See LICENSE.txt for details.
from .models import PasswordExpiry
from django.conf import settings
def password_is_expired(user):
password_expiry, _ = PasswordExpiry.objects.get_or_create(user=user)
password_settings = getattr(settings, 'MANDATORY_PASSWORD_CHANGE', {})
include_superusers = password_settings.get('INCLUDE_SUPERUSERS', False)
if include_superusers:
return password_expiry.is_expired()
else:
return not user.is_superuser and password_expiry.is_expired()
def never_expire_password(user):
password_expiry, _ = PasswordExpiry.objects.get_or_create(user=user)
password_expiry.never_expire()
|
EricMuller/mywebmarks-backend
|
refs/heads/master
|
requirements/twisted/Twisted-17.1.0/src/twisted/cred/error.py
|
27
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Cred errors.
"""
from __future__ import division, absolute_import
class Unauthorized(Exception):
"""Standard unauthorized error."""
class LoginFailed(Exception):
"""
The user's request to log in failed for some reason.
"""
class UnauthorizedLogin(LoginFailed, Unauthorized):
"""The user was not authorized to log in.
"""
class UnhandledCredentials(LoginFailed):
"""A type of credentials were passed in with no knowledge of how to check
them. This is a server configuration error - it means that a protocol was
connected to a Portal without a CredentialChecker that can check all of its
potential authentication strategies.
"""
class LoginDenied(LoginFailed):
"""
The realm rejected this login for some reason.
Examples of reasons this might be raised include an avatar logging in
too frequently, a quota having been fully used, or the overall server
load being too high.
"""
|
borgcoin/Borgcoin1
|
refs/heads/Borgcoin
|
contrib/bitrpc/bitrpc.py
|
13
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9334")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9334")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
tzulberti/entrenamiento-arqueria
|
refs/heads/master
|
alembic/versions/029_tipo_entrenamiento.py
|
1
|
# -*- coding: utf-8 -*-
"""tipo_entrenamiento
Revision ID: 029
Revises: 028
Create Date: 2014-08-05 22:48:29.805732
"""
# revision identifiers, used by Alembic.
revision = '029'
down_revision = '028'
import inspect
import imp
import os
from alembic import op
def upgrade():
utils_path = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))),
'..',
'utils.py')
utils = imp.load_source('', utils_path)
utils.create_categoric_table('tipo_entrenamiento',
[u'Acumulación',
u'Transmisión',
u'Realización',
])
def downgrade():
op.drop_table('tipo_entrenamiento')
|
ltcmelo/qt-creator
|
refs/heads/master
|
share/qtcreator/debugger/boosttypes.py
|
4
|
############################################################################
#
# Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
# Contact: http://www.qt-project.org/legal
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and Digia. For licensing terms and
# conditions see http://www.qt.io/licensing. For further information
# use the contact form at http://www.qt.io/contact-us.
#
# GNU Lesser General Public License Usage
# Alternatively, this file may be used under the terms of the GNU Lesser
# General Public License version 2.1 or version 3 as published by the Free
# Software Foundation and appearing in the file LICENSE.LGPLv21 and
# LICENSE.LGPLv3 included in the packaging of this file. Please review the
# following information to ensure the GNU Lesser General Public License
# requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
#
# In addition, as a special exception, Digia gives you certain additional
# rights. These rights are described in the Digia Qt LGPL Exception
# version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
#
#############################################################################
from dumper import *
def qdump__boost__bimaps__bimap(d, value):
#leftType = d.templateArgument(value.type, 0)
#rightType = d.templateArgument(value.type, 1)
size = int(value["core"]["node_count"])
d.putItemCount(size)
if d.isExpanded():
d.putPlainChildren(value)
def qdump__boost__optional(d, value):
if int(value["m_initialized"]) == 0:
d.putValue("<uninitialized>")
d.putNumChild(0)
else:
type = d.templateArgument(value.type, 0)
storage = value["m_storage"]
if d.isReferenceType(type):
d.putItem(storage.cast(type.target().pointer()).dereference())
else:
d.putItem(storage.cast(type))
d.putBetterType(value.type)
def qdump__boost__shared_ptr(d, value):
# s boost::shared_ptr<int>
# pn boost::detail::shared_count
# pi_ 0x0 boost::detail::sp_counted_base *
# px 0x0 int *
if d.isNull(value["pn"]["pi_"]):
d.putValue("(null)")
d.putNumChild(0)
return
if d.isNull(value["px"]):
d.putValue("(null)")
d.putNumChild(0)
return
countedbase = value["pn"]["pi_"].dereference()
weakcount = int(countedbase["weak_count_"])
usecount = int(countedbase["use_count_"])
d.check(weakcount >= 0)
d.check(usecount <= 10*1000*1000)
val = value["px"].dereference()
type = val.type
# handle boost::shared_ptr<int>::element_type as int
if str(type).endswith(">::element_type"):
type = type.strip_typedefs()
if d.isSimpleType(type):
d.putNumChild(3)
d.putItem(val)
d.putBetterType(value.type)
else:
d.putEmptyValue()
d.putNumChild(3)
if d.isExpanded():
with Children(d, 3):
d.putSubItem("data", val)
d.putIntItem("weakcount", weakcount)
d.putIntItem("usecount", usecount)
def qdump__boost__container__list(d, value):
r = value["members_"]["m_icont"]["data_"]["root_plus_size_"]
n = toInteger(r["size_"])
d.putItemCount(n)
if d.isExpanded():
innerType = d.templateArgument(value.type, 0)
offset = 2 * d.ptrSize()
with Children(d, n):
p = r["root_"]["next_"]
for i in xrange(n):
d.putSubItem("%s" % i, d.createValue(d.pointerValue(p) + offset, innerType))
p = p["next_"]
def qdump__boost__gregorian__date(d, value):
d.putValue(int(value["days_"]), JulianDate)
d.putNumChild(0)
def qdump__boost__posix_time__ptime(d, value):
ms = int(int(value["time_"]["time_count_"]["value_"]) / 1000)
d.putValue("%s/%s" % divmod(ms, 86400000), JulianDateAndMillisecondsSinceMidnight)
d.putNumChild(0)
def qdump__boost__posix_time__time_duration(d, value):
d.putValue(int(int(value["ticks_"]["value_"]) / 1000), MillisecondsSinceMidnight)
d.putNumChild(0)
def qdump__boost__unordered__unordered_set(d, value):
base = d.addressOf(value)
ptrSize = d.ptrSize()
size = d.extractInt(base + 2 * ptrSize)
d.putItemCount(size)
if d.isExpanded():
innerType = d.templateArgument(value.type, 0)
bucketCount = d.extractInt(base + ptrSize)
offset = int((innerType.sizeof + ptrSize - 1) / ptrSize) * ptrSize
with Children(d, size, maxNumChild=10000):
afterBuckets = d.extractPointer(base + 5 * ptrSize)
afterBuckets += bucketCount * ptrSize
item = d.extractPointer(afterBuckets)
for j in d.childRange():
d.putSubItem(j, d.createValue(item - offset, innerType))
item = d.extractPointer(item)
|
richpolis/siveinpy
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/vi/__init__.py
|
12133432
| |
NullSoldier/django
|
refs/heads/master
|
tests/m2m_through_regress/__init__.py
|
12133432
| |
oshtaier/robottelo
|
refs/heads/master
|
tests/robottelo/test_decorators.py
|
3
|
"""Unit tests for :mod:`robottelo.common.decorators`."""
from ddt import DATA_ATTR
from fauxfactory import gen_integer
from robottelo.common import conf, decorators
from unittest import TestCase
# (Too many public methods) pylint: disable=R0904
class DataTestCase(TestCase):
"""Tests for :func:`robottelo.common.decorators.data`."""
def setUp(self): # noqa pylint:disable=C0103
self.test_data = ('one', 'two', 'three')
def function():
"""An empty function."""
self.function = function
def test_smoke(self):
conf.properties['main.smoke'] = '1'
decorated = decorators.data(*self.test_data)(self.function)
data_attr = getattr(decorated, DATA_ATTR)
self.assertEqual(len(data_attr), 1)
self.assertIn(data_attr[0], self.test_data)
def test_not_smoke(self):
conf.properties['main.smoke'] = '0'
decorated = decorators.data(*self.test_data)(self.function)
data_attr = getattr(decorated, DATA_ATTR)
self.assertEqual(len(data_attr), len(self.test_data))
self.assertEqual(getattr(decorated, DATA_ATTR), self.test_data)
class BzBugIsOpenTestCase(TestCase):
"""Tests for :func:`robottelo.common.decorators.bz_bug_is_open`."""
# (protected-access) pylint:disable=W0212
def setUp(self): # noqa pylint:disable=C0103
"""Back up objects and generate common values."""
self.backup = decorators._get_bugzilla_bug
self.bug_id = gen_integer()
def tearDown(self): # noqa pylint:disable=C0103
"""Restore backed-up objects."""
decorators._get_bugzilla_bug = self.backup
def test_bug_is_open(self):
"""Assert ``True`` is returned if the bug is 'NEW' or 'ASSIGNED'."""
class MockBug(object): # pylint:disable=R0903
"""A mock bug with an open status."""
status = 'NEW'
decorators._get_bugzilla_bug = lambda bug_id: MockBug()
self.assertTrue(decorators.bz_bug_is_open(self.bug_id))
MockBug.status = 'ASSIGNED'
self.assertTrue(decorators.bz_bug_is_open(self.bug_id))
def test_bug_is_closed(self):
"""Assert ``False`` is returned if the bug is not open."""
class MockBug(object): # pylint:disable=R0903
"""A mock bug with a closed status."""
status = 'CLOSED'
decorators._get_bugzilla_bug = lambda bug_id: MockBug()
self.assertFalse(decorators.bz_bug_is_open(self.bug_id))
MockBug.status = 'ON_QA'
self.assertFalse(decorators.bz_bug_is_open(self.bug_id))
MockBug.status = 'SLOWLY DRIVING A DEV INSANE'
self.assertFalse(decorators.bz_bug_is_open(self.bug_id))
def test_bug_lookup_fails(self):
"""Assert ``False`` is returned if the bug cannot be found."""
def bomb(_):
"""A function that mocks a failure to fetch a bug."""
raise decorators.BugFetchError
decorators._get_bugzilla_bug = bomb
self.assertFalse(decorators.bz_bug_is_open(self.bug_id))
class RmBugIsOpenTestCase(TestCase):
"""Tests for :func:`robottelo.common.decorators.rm_bug_is_open`."""
# (protected-access) pylint:disable=W0212
def setUp(self): # noqa pylint:disable=C0103
"""Back up objects and generate common values."""
self.rm_backup = decorators._get_redmine_bug_status_id
self.stat_backup = decorators._redmine_closed_issue_statuses
decorators._redmine_closed_issue_statuses = lambda: [1, 2]
self.bug_id = gen_integer()
def tearDown(self): # noqa pylint:disable=C0103
"""Restore backed-up objects."""
decorators._get_redmine_bug_status_id = self.rm_backup
decorators._redmine_closed_issue_statuses = self.stat_backup
def test_bug_is_open(self):
"""Assert ``True`` is returned if the bug is open."""
decorators._get_redmine_bug_status_id = lambda bug_id: 0
self.assertTrue(decorators.rm_bug_is_open(self.bug_id))
decorators._get_redmine_bug_status_id = lambda bug_id: 3
self.assertTrue(decorators.rm_bug_is_open(self.bug_id))
def test_bug_is_closed(self):
"""Assert ``False`` is returned if the bug is closed."""
decorators._get_redmine_bug_status_id = lambda bug_id: 1
self.assertFalse(decorators.rm_bug_is_open(self.bug_id))
decorators._get_redmine_bug_status_id = lambda bug_id: 2
self.assertFalse(decorators.rm_bug_is_open(self.bug_id))
def test_bug_lookup_fails(self):
"""Assert ``False`` is returned if the bug cannot be found."""
def bomb(_):
"""A function that mocks a failure to fetch a bug."""
raise decorators.BugFetchError
decorators._get_redmine_bug_status_id = bomb
self.assertFalse(decorators.rm_bug_is_open(self.bug_id))
class RunOnlyOnTestCase(TestCase):
"""Tests for :func:`robottelo.common.decorators.run_only_on`."""
def setUp(self): # noqa
"""Backup object."""
self.project_backup = conf.properties.get('main.project')
def tearDown(self): # noqa
"""Restore backed-up object."""
conf.properties['main.project'] = self.project_backup
def test_project_mode_different_cases(self):
"""Assert ``True`` for different cases of accepted input values
for project / robottelo modes."""
accepted_values = ('SAT', 'SAt', 'SaT', 'Sat', 'sat', 'sAt',
'SAM', 'SAm', 'SaM', 'Sam', 'sam', 'sAm')
# Test different project values
conf.properties['main.project'] = 'sam'
for project in accepted_values:
self.assertTrue(decorators.run_only_on(project))
# Test different mode values
for mode in accepted_values:
conf.properties['main.project'] = mode
self.assertTrue(decorators.run_only_on('SAT'))
def test_invalid_project(self):
"""Assert error is thrown when project has invalid value."""
conf.properties['main.project'] = 'sam'
with self.assertRaises(decorators.ProjectModeError):
decorators.run_only_on('satddfddffdf')
def test_invalid_mode(self):
"""Assert error is thrown when mode has invalid value."""
# Invalid value for robottelo mode
conf.properties['main.project'] = 'samtdd'
with self.assertRaises(decorators.ProjectModeError):
decorators.run_only_on('sat')
|
neerajvashistha/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/flatpages/templatetags/flatpages.py
|
472
|
from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
if 'request' in context:
site_pk = get_current_site(context['request']).pk
else:
site_pk = settings.SITE_ID
flatpages = FlatPage.objects.filter(sites__id=site_pk)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
@register.tag
def get_flatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
|
rmelo19/rmelo19-arduino
|
refs/heads/master
|
fritzing/fritzing.0.9.2b.64.pc/parts/part-gen-scripts/misc_scripts/listpropnames.py
|
4
|
# usage:
# listpropnames.py -d <directory>
#
# <directory> is a folder containing .fzp files. In each fzp file in the directory containing a <property> element:
# like <property name="whatever" ...>
# list the name, if it's not already listed
import getopt, sys, os, re
def usage():
print """
usage:
listpropnames.py -d [directory]
directory is a folder containing .fzp files.
In each fzp file in the directory containing a <property> element:
list each different name attribute.
"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:", ["help", "directory"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
outputDir = None
for o, a in opts:
#print o
#print a
if o in ("-d", "--directory"):
outputDir = a
elif o in ("-h", "--help"):
usage()
sys.exit(2)
else:
assert False, "unhandled option"
if(not(outputDir)):
usage()
sys.exit(2)
names = []
for filename in os.listdir(outputDir):
if (filename.endswith(".fzp")):
infile = open(os.path.join(outputDir, filename), "r")
fzp = infile.read();
infile.close();
match = re.search('<property.+name=\"(.+)\".*>.+</property>', fzp)
if (match != None):
if not (match.group(1) in names):
names.append(match.group(1));
print "{0}".format(match.group(1))
if __name__ == "__main__":
main()
|
htygithub/bokeh
|
refs/heads/master
|
examples/plotting/file/vector.py
|
45
|
from __future__ import division
import numpy as np
from bokeh.plotting import figure, show, output_file, vplot
def streamlines(x, y, u, v, density=1):
'''Returns streamlines of a vector flow.
* x and y are 1d arrays defining an *evenly spaced* grid.
* u and v are 2d arrays (shape [y,x]) giving velocities.
* density controls the closeness of the streamlines. For different
densities in each direction, use a tuple or list [densityx, densityy].
'''
## Set up some constants - size of the grid used.
NGX = len(x)
NGY = len(y)
## Constants used to convert between grid index coords and user coords.
DX = x[1]-x[0]
DY = y[1]-y[0]
XOFF = x[0]
YOFF = y[0]
## Now rescale velocity onto axes-coordinates
u = u / (x[-1]-x[0])
v = v / (y[-1]-y[0])
speed = np.sqrt(u*u+v*v)
## s (path length) will now be in axes-coordinates, but we must
## rescale u for integrations.
u *= NGX
v *= NGY
## Now u and v in grid-coordinates.
NBX = int(30*density)
NBY = int(30*density)
blank = np.zeros((NBY,NBX))
bx_spacing = NGX/float(NBX-1)
by_spacing = NGY/float(NBY-1)
def blank_pos(xi, yi):
return int((xi / bx_spacing) + 0.5), \
int((yi / by_spacing) + 0.5)
def value_at(a, xi, yi):
if type(xi) == np.ndarray:
x = xi.astype(np.int)
y = yi.astype(np.int)
else:
x = np.int(xi)
y = np.int(yi)
a00 = a[y,x]
a01 = a[y,x+1]
a10 = a[y+1,x]
a11 = a[y+1,x+1]
xt = xi - x
yt = yi - y
a0 = a00*(1-xt) + a01*xt
a1 = a10*(1-xt) + a11*xt
return a0*(1-yt) + a1*yt
def rk4_integrate(x0, y0):
## This function does RK4 forward and back trajectories from
## the initial conditions, with the odd 'blank array'
## termination conditions. TODO tidy the integration loops.
def f(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return ui*dt_ds, vi*dt_ds
def g(xi, yi):
dt_ds = 1./value_at(speed, xi, yi)
ui = value_at(u, xi, yi)
vi = value_at(v, xi, yi)
return -ui*dt_ds, -vi*dt_ds
check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1
bx_changes = []
by_changes = []
## Integrator function
def rk4(x0, y0, f):
ds = 0.01 #min(1./NGX, 1./NGY, 0.01)
stotal = 0
xi = x0
yi = y0
xb, yb = blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
# Time step. First save the point.
xf_traj.append(xi)
yf_traj.append(yi)
# Next, advance one using RK4
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y)
k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y)
k4x, k4y = f(xi + ds*k3x, yi + ds*k3y)
except IndexError:
# Out of the domain on one of the intermediate steps
break
xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6.
yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6.
# Final position might be out of the domain
if not check(xi, yi): break
stotal += ds
# Next, if s gets to thres, check blank.
new_xb, new_yb = blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
# New square, so check and colour. Quit if required.
if blank[new_yb,new_xb] == 0:
blank[new_yb,new_xb] = 1
bx_changes.append(new_xb)
by_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
integrator = rk4
sf, xf_traj, yf_traj = integrator(x0, y0, f)
sb, xb_traj, yb_traj = integrator(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
## Tests to check length of traj. Remember, s in units of axes.
if len(x_traj) < 1: return None
if stotal > .2:
initxb, inityb = blank_pos(x0, y0)
blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(bx_changes, by_changes):
blank[yb, xb] = 0
return None
## A quick function for integrating trajectories if blank==0.
trajectories = []
def traj(xb, yb):
if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY:
return
if blank[yb, xb] == 0:
t = rk4_integrate(xb*bx_spacing, yb*by_spacing)
if t != None:
trajectories.append(t)
## Now we build up the trajectory set. I've found it best to look
## for blank==0 along the edges first, and work inwards.
for indent in range((max(NBX,NBY))//2):
for xi in range(max(NBX,NBY)-2*indent):
traj(xi+indent, indent)
traj(xi+indent, NBY-1-indent)
traj(indent, xi+indent)
traj(NBX-1-indent, xi+indent)
xs = [np.array(t[0])*DX+XOFF for t in trajectories]
ys = [np.array(t[1])*DY+YOFF for t in trajectories]
return xs, ys
xx = np.linspace(-3, 3, 100)
yy = np.linspace(-3, 3, 100)
Y, X = np.meshgrid(xx, yy)
U = -1 - X**2 + Y
V = 1 + X - Y**2
speed = np.sqrt(U*U + V*V)
theta = np.arctan(V/U)
x0 = X[::2, ::2].flatten()
y0 = Y[::2, ::2].flatten()
length = speed[::2, ::2].flatten()/40
angle = theta[::2, ::2].flatten()
x1 = x0 + length * np.cos(angle)
y1 = y0 + length * np.sin(angle)
xs, ys = streamlines(xx, yy, U.T, V.T, density=2)
cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"])
ix = ((length-length.min())/(length.max()-length.min())*5).astype('int')
colors = cm[ix]
output_file("vector.html", title="vector.py example")
p1 = figure()
p1.segment(x0, y0, x1, y1, color=colors, line_width=2)
p2 = figure()
p2.multi_line(xs, ys, color="#ee6666", line_width=2, line_alpha=0.8)
show(vplot(p1,p2)) # open a browser
|
fkorotkov/pants
|
refs/heads/master
|
tests/python/pants_test/backend/jvm/tasks/test_consolidate_classpath.py
|
6
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.consolidate_classpath import ConsolidateClasspath
from pants.build_graph.resources import Resources
from pants.java.jar.jar_dependency import JarDependency
from pants.util.dirutil import safe_file_dump
from pants_test.backend.jvm.tasks.jvm_binary_task_test_base import JvmBinaryTaskTestBase
class TestConsolidateClasspath(JvmBinaryTaskTestBase):
@classmethod
def task_type(cls):
return ConsolidateClasspath
def setUp(self):
"""Prepare targets, context, runtime classpath. """
super(TestConsolidateClasspath, self).setUp()
self.task = self.prepare_execute(self.context())
self.jar_artifact = self.create_artifact(org='org.example', name='foo', rev='1.0.0')
self.zip_artifact = self.create_artifact(org='org.pantsbuild', name='bar', rev='2.0.0',
ext='zip')
self.bundle_artifact = self.create_artifact(org='org.apache', name='baz', rev='3.0.0',
classifier='tests')
self.tar_gz_artifact = self.create_artifact(org='org.gnu', name='gary', rev='4.0.0',
ext='tar.gz')
self.jar_lib = self.make_target(spec='3rdparty/jvm/org/example:foo',
target_type=JarLibrary,
jars=[JarDependency(org='org.example', name='foo', rev='1.0.0'),
JarDependency(org='org.pantsbuild',
name='bar',
rev='2.0.0',
ext='zip'),
JarDependency(org='org.apache', name='baz', rev='3.0.0',
classifier='tests'),
JarDependency(org='org.gnu', name='gary', rev='4.0.0',
ext='tar.gz')])
safe_file_dump(os.path.join(self.build_root, 'resources/foo/file'), '// dummy content')
self.resources_target = self.make_target('//resources:foo-resources', Resources,
sources=['foo/file'])
# This is so that payload fingerprint can be computed.
safe_file_dump(os.path.join(self.build_root, 'foo/Foo.java'), '// dummy content')
self.java_lib_target = self.make_target('//foo:foo-library', JavaLibrary, sources=['Foo.java'])
self.binary_target = self.make_target(spec='//foo:foo-binary',
target_type=JvmBinary,
dependencies=[self.java_lib_target, self.jar_lib],
resources=[self.resources_target.address.spec])
self.dist_root = os.path.join(self.build_root, 'dist')
def _setup_classpath(self, task_context):
"""As a separate prep step because to test different option settings, this needs to rerun
after context is re-created.
"""
self.ensure_classpath_products(task_context)
self.add_to_runtime_classpath(task_context, self.binary_target,
{'Foo.class': '', 'foo.txt': '', 'foo/file': ''})
def test_remove_raw_deps(self):
"""Test default setting outputs bundle products using `target.id`."""
self.app_target = self.make_target(spec='//foo:foo-app',
target_type=JvmApp,
basename='FooApp',
dependencies=[self.binary_target])
self.task_context = self.context(target_roots=[self.app_target])
self._setup_classpath(self.task_context)
self.execute(self.task_context)
task_dir = os.path.join(
self.pants_workdir,
'pants_backend_jvm_tasks_consolidate_classpath_ConsolidateClasspath'
)
found_files = [os.path.basename(f) for f in self.iter_files(task_dir)]
self.assertEquals(
sorted(['output-0.jar', 'Foo.class', 'foo.txt', 'file']),
sorted(found_files)
)
# Confirm that we haven't destroyed deps.
expected_non_deps = set(['output-0.jar', 'Foo.class', 'foo.txt', 'file'])
found = set(os.listdir(self.pants_workdir))
print(expected_non_deps - found)
self.assertTrue(expected_non_deps - found == expected_non_deps)
def test_consolidate_classpath(self):
"""Test default setting outputs bundle products using `target.id`."""
self.app_target = self.make_target(spec='//foo:foo-app',
target_type=JvmApp,
basename='FooApp',
dependencies=[self.binary_target])
self.task_context = self.context(target_roots=[self.app_target])
self._setup_classpath(self.task_context)
self.execute(self.task_context)
task_dir = os.path.join(
self.pants_workdir,
'pants_backend_jvm_tasks_consolidate_classpath_ConsolidateClasspath'
)
found_files = [os.path.basename(f) for f in self.iter_files(task_dir)]
self.assertEquals(
sorted(['output-0.jar', 'Foo.class', 'foo.txt', 'file']),
sorted(found_files)
)
# Confirm that we haven't destroyed deps.
expected_deps = set(['org.apache-baz-3.0.0-tests.jar',
'org.example-foo-1.0.0.jar',
'org.gnu-gary-4.0.0.tar.gz',
'org.pantsbuild-bar-2.0.0.zip'])
found = set(os.listdir(self.pants_workdir))
self.assertTrue(expected_deps - found == set())
|
SciTools/iris
|
refs/heads/main
|
tools/update_lockfiles.py
|
2
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A command line utility for generating conda-lock files for the environments
that nox uses for testing each different supported version of python.
Typical usage:
python tools/update_lockfiles.py -o requirements/ci/nox.lock requirements/ci/py*.yml
"""
import argparse
from pathlib import Path
import subprocess
import sys
try:
import conda_lock
except:
print("conda-lock must be installed.")
exit(1)
parser = argparse.ArgumentParser(
"Iris Lockfile Generator",
)
parser.add_argument('files', nargs='+',
help="List of environment.yml files to lock")
parser.add_argument('--output-dir', '-o', default='.',
help="Directory to save output lock files")
args = parser.parse_args()
for infile in args.files:
print(f"generating lockfile for {infile}", file=sys.stderr)
fname = Path(infile).name
ftype = fname.split('.')[-1]
if ftype.lower() in ('yaml', 'yml'):
fname = '.'.join(fname.split('.')[:-1])
# conda-lock --filename-template expects a string with a "...{platform}..."
# placeholder in it, so we have to build the .lock filname without
# using .format
ofile_template = Path(args.output_dir) / (fname+'-{platform}.lock')
subprocess.call([
'conda-lock',
'lock',
'--filename-template', ofile_template,
'--file', infile,
'--platform', 'linux-64'
])
print(f"lockfile saved to {ofile_template}".format(platform='linux-64'),
file=sys.stderr)
|
tethysplatform/tethys
|
refs/heads/master
|
tethys_portal/middleware.py
|
2
|
"""
********************************************************************************
* Name: middleware.py
* Author: Nathan Swain
* Created On: August 1, 2015
* Copyright: (c) Brigham Young University 2015
* License: BSD 2-Clause
********************************************************************************
"""
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.shortcuts import redirect
from rest_framework.authentication import TokenAuthentication
from rest_framework.exceptions import AuthenticationFailed
from mfa.helpers import has_mfa
from social_django.middleware import SocialAuthExceptionMiddleware
from social_core import exceptions as social_exceptions
from tethys_cli.cli_colors import pretty_output, FG_WHITE
from tethys_apps.utilities import get_active_app, user_can_access_app
from tethys_portal.views.error import handler_404
class TethysSocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware):
def process_exception(self, request, exception):
if hasattr(social_exceptions, exception.__class__.__name__):
if isinstance(exception, social_exceptions.AuthCanceled):
if request.user.is_anonymous:
return redirect('accounts:login')
else:
return redirect('user:settings')
elif isinstance(exception, social_exceptions.AuthAlreadyAssociated):
blurb = 'The {0} account you tried to connect to has already been associated with another account.'
with pretty_output(FG_WHITE) as p:
p.write(exception.backend.name)
if 'google' in exception.backend.name:
blurb = blurb.format('Google')
elif 'linkedin' in exception.backend.name:
blurb = blurb.format('LinkedIn')
elif 'hydroshare' in exception.backend.name:
blurb = blurb.format('HydroShare')
elif 'facebook' in exception.backend.name:
blurb = blurb.format('Facebook')
else:
blurb = blurb.format('social')
messages.success(request, blurb)
if request.user.is_anonymous:
return redirect('accounts:login')
else:
return redirect('user:settings')
elif isinstance(exception, social_exceptions.NotAllowedToDisconnect):
blurb = 'Unable to disconnect from this social account.'
messages.success(request, blurb)
if request.user.is_anonymous:
return redirect('accounts:login')
else:
return redirect('user:settings')
class TethysAppAccessMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
app = get_active_app(request)
if app is None:
return response
else:
if not app.enabled:
if request.user.is_staff:
return handler_404(request, PermissionDenied, "This app is disabled. A user with admin permissions "
"can enable this app from the app settings page.")
else:
return handler_404(request, PermissionDenied)
elif user_can_access_app(request.user, app):
return response
else:
return handler_404(request, PermissionDenied)
class TethysMfaRequiredMiddleware():
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
mfa_required = getattr(settings, 'MFA_REQUIRED', False)
sso_mfa_required = getattr(settings, 'SSO_MFA_REQUIRED', False)
admin_mfa_required = getattr(settings, 'ADMIN_MFA_REQUIRED', True)
# Override MFA_REQUIRED setting for API Token authentication
if mfa_required and 'Authorization' in request.headers \
and TokenAuthentication.keyword in request.headers['Authorization']:
# Verify Token
try:
ta = TokenAuthentication()
ta.authenticate(request)
mfa_required = False
except AuthenticationFailed:
pass
# Override MFA_REQUIRED setting for users logged in with SSO
has_social_auth_attr = getattr(request.user, 'social_auth', None) is not None
if mfa_required and not sso_mfa_required and has_social_auth_attr and request.user.social_auth.count() > 0:
mfa_required = False
# Override MFA_REQUIRED setting for staff users
if mfa_required and not admin_mfa_required and request.user.is_staff:
mfa_required = False
if mfa_required and not has_mfa(request, request.user.username):
if '/mfa' not in request.path \
and '/devices' not in request.path \
and '/oauth2' not in request.path \
and '/accounts' not in request.path \
and '/user' not in request.path \
and '/captcha' not in request.path \
and request.path != '/':
messages.error(request, 'You must configure Multi Factor Authentication to continue.')
return redirect('mfa_home')
response = self.get_response(request)
return response
|
lyft/incubator-airflow
|
refs/heads/master
|
airflow/contrib/operators/bigquery_to_mysql_operator.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_mysql`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.cloud.operators.bigquery_to_mysql import BigQueryToMySqlOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.bigquery_to_mysql`.",
DeprecationWarning, stacklevel=2
)
|
bashalex/datapot
|
refs/heads/master
|
examples/tink_eval.py
|
1
|
#!/usr/bin/env python
'''
Tinkoff Boosters contest
Common case of a scoring dataset
5 categorical and 7 numerical variables
Use OneHot-encoding representation for categorical variables
Apply SVD compression for features with number of categories >= 10
datapot.remove_transformer to exclude redundant transformations
'''
from __future__ import print_function
import sys
import bz2
import time
import xgboost as xgb
from sklearn.model_selection import cross_val_score
import datapot as dp
from datapot.datasets import load_tinkoff
data = load_tinkoff()
datapot = dp.DataPot()
t0 = time.time()
datapot.detect(data)
print('detect time:', time.time()-t0)
datapot.remove_transformer('open_account_flg', 0)
t0 = time.time()
datapot.fit(data, verbose=True)
print('fit time:', time.time()-t0)
t0 = time.time()
df = datapot.transform(data)
print('transform time:', time.time()-t0)
X = df.drop([
'open_account_flg',
], axis=1)
y = df['open_account_flg']
model = xgb.XGBClassifier()
cv_score = cross_val_score(model, X, y, cv=5)
assert all(i > 0.5 for i in cv_score), 'Low score!'
print('Cross-val score', cv_score)
model.fit(X, y)
fi = model.feature_importances_
print('Feature importance:')
print(*(list(zip(X.columns, fi))), sep='\n')
|
mationic/pyload
|
refs/heads/stable
|
module/ConfigParser.py
|
35
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from time import sleep
from os.path import exists, join
from shutil import copy
from traceback import print_exc
from utils import chmod
# ignore these plugin configs, mainly because plugins were wiped out
IGNORE = (
"FreakshareNet", "SpeedManager", "ArchiveTo", "ShareCx", ('hooks', 'UnRar'),
'EasyShareCom', 'FlyshareCz'
)
CONF_VERSION = 1
class ConfigParser:
"""
holds and manage the configuration
current dict layout:
{
section : {
option : {
value:
type:
desc:
}
desc:
}
"""
def __init__(self):
"""Constructor"""
self.config = {} # the config values
self.plugin = {} # the config for plugins
self.oldRemoteData = {}
self.pluginCB = None # callback when plugin config value is changed
self.checkVersion()
self.readConfig()
self.deleteOldPlugins()
def checkVersion(self, n=0):
"""determines if config need to be copied"""
try:
if not exists("pyload.conf"):
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
if not exists("plugin.conf"):
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
f = open("pyload.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
copy(join(pypath, "module", "config", "default.conf"), "pyload.conf")
print "Old version of config was replaced"
f = open("plugin.conf", "rb")
v = f.readline()
f.close()
v = v[v.find(":") + 1:].strip()
if not v or int(v) < CONF_VERSION:
f = open("plugin.conf", "wb")
f.write("version: " + str(CONF_VERSION))
f.close()
print "Old version of plugin-config replaced"
except:
if n < 3:
sleep(0.3)
self.checkVersion(n + 1)
else:
raise
def readConfig(self):
"""reads the config file"""
self.config = self.parseConfig(join(pypath, "module", "config", "default.conf"))
self.plugin = self.parseConfig("plugin.conf")
try:
homeconf = self.parseConfig("pyload.conf")
if "username" in homeconf["remote"]:
if "password" in homeconf["remote"]:
self.oldRemoteData = {"username": homeconf["remote"]["username"]["value"],
"password": homeconf["remote"]["username"]["value"]}
del homeconf["remote"]["password"]
del homeconf["remote"]["username"]
self.updateValues(homeconf, self.config)
except Exception, e:
print "Config Warning"
print_exc()
def parseConfig(self, config):
"""parses a given configfile"""
f = open(config)
config = f.read()
config = config.splitlines()[1:]
conf = {}
section, option, value, typ, desc = "", "", "", "", ""
listmode = False
for line in config:
comment = line.rfind("#")
if line.find(":", comment) < 0 > line.find("=", comment) and comment > 0 and line[comment - 1].isspace():
line = line.rpartition("#") # removes comments
if line[1]:
line = line[0]
else:
line = line[2]
line = line.strip()
try:
if line == "":
continue
elif line.endswith(":"):
section, none, desc = line[:-1].partition('-')
section = section.strip()
desc = desc.replace('"', "").strip()
conf[section] = {"desc": desc}
else:
if listmode:
if line.endswith("]"):
listmode = False
line = line.replace("]", "")
value += [self.cast(typ, x.strip()) for x in line.split(",") if x]
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
else:
content, none, value = line.partition("=")
content, none, desc = content.partition(":")
desc = desc.replace('"', "").strip()
typ, none, option = content.strip().rpartition(" ")
value = value.strip()
if value.startswith("["):
if value.endswith("]"):
listmode = False
value = value[:-1]
else:
listmode = True
value = [self.cast(typ, x.strip()) for x in value[1:].split(",") if x]
else:
value = self.cast(typ, value)
if not listmode:
conf[section][option] = {"desc": desc,
"type": typ,
"value": value}
except Exception, e:
print "Config Warning"
print_exc()
f.close()
return conf
def updateValues(self, config, dest):
"""sets the config values from a parsed config file to values in destination"""
for section in config.iterkeys():
if section in dest:
for option in config[section].iterkeys():
if option in ("desc", "outline"): continue
if option in dest[section]:
dest[section][option]["value"] = config[section][option]["value"]
#else:
# dest[section][option] = config[section][option]
#else:
# dest[section] = config[section]
def saveConfig(self, config, filename):
"""saves config to filename"""
with open(filename, "wb") as f:
chmod(filename, 0600)
f.write("version: %i \n" % CONF_VERSION)
for section in config.iterkeys():
f.write('\n%s - "%s":\n' % (section, config[section]["desc"]))
for option, data in config[section].iteritems():
if option in ("desc", "outline"): continue
if isinstance(data["value"], list):
value = "[ \n"
for x in data["value"]:
value += "\t\t" + str(x) + ",\n"
value += "\t\t]\n"
else:
if type(data["value"]) in (str, unicode):
value = data["value"] + "\n"
else:
value = str(data["value"]) + "\n"
try:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value))
except UnicodeEncodeError:
f.write('\t%s %s : "%s" = %s' % (data["type"], option, data["desc"], value.encode("utf8")))
def cast(self, typ, value):
"""cast value to given format"""
if type(value) not in (str, unicode):
return value
elif typ == "int":
return int(value)
elif typ == "bool":
return True if value.lower() in ("1", "true", "on", "an", "yes") else False
elif typ == "time":
if not value: value = "0:00"
if not ":" in value: value += ":00"
return value
elif typ in ("str", "file", "folder"):
try:
return value.encode("utf8")
except:
return value
else:
return value
def save(self):
"""saves the configs to disk"""
self.saveConfig(self.config, "pyload.conf")
self.saveConfig(self.plugin, "plugin.conf")
def __getitem__(self, section):
"""provides dictonary like access: c['section']['option']"""
return Section(self, section)
def get(self, section, option):
"""get value"""
val = self.config[section][option]["value"]
try:
if type(val) in (str, unicode):
return val.decode("utf8")
else:
return val
except:
return val
def set(self, section, option, value):
"""set value"""
value = self.cast(self.config[section][option]["type"], value)
self.config[section][option]["value"] = value
self.save()
def getPlugin(self, plugin, option):
"""gets a value for a plugin"""
val = self.plugin[plugin][option]["value"]
try:
if type(val) in (str, unicode):
return val.decode("utf8")
else:
return val
except:
return val
def setPlugin(self, plugin, option, value):
"""sets a value for a plugin"""
value = self.cast(self.plugin[plugin][option]["type"], value)
if self.pluginCB: self.pluginCB(plugin, option, value)
self.plugin[plugin][option]["value"] = value
self.save()
def getMetaData(self, section, option):
""" get all config data for an option """
return self.config[section][option]
def addPluginConfig(self, name, config, outline=""):
"""adds config options with tuples (name, type, desc, default)"""
if name not in self.plugin:
conf = {"desc": name,
"outline": outline}
self.plugin[name] = conf
else:
conf = self.plugin[name]
conf["outline"] = outline
for item in config:
if item[0] in conf:
conf[item[0]]["type"] = item[1]
conf[item[0]]["desc"] = item[2]
else:
conf[item[0]] = {
"desc": item[2],
"type": item[1],
"value": self.cast(item[1], item[3])
}
values = [x[0] for x in config] + ["desc", "outline"]
#delete old values
for item in conf.keys():
if item not in values:
del conf[item]
def deleteConfig(self, name):
"""Removes a plugin config"""
if name in self.plugin:
del self.plugin[name]
def deleteOldPlugins(self):
""" remove old plugins from config """
for name in IGNORE:
if name in self.plugin:
del self.plugin[name]
class Section:
"""provides dictionary like access for configparser"""
def __init__(self, parser, section):
"""Constructor"""
self.parser = parser
self.section = section
def __getitem__(self, item):
"""getitem"""
return self.parser.get(self.section, item)
def __setitem__(self, item, value):
"""setitem"""
self.parser.set(self.section, item, value)
if __name__ == "__main__":
pypath = ""
from time import time
a = time()
c = ConfigParser()
b = time()
print "sec", b - a
print c.config
c.saveConfig(c.config, "user.conf")
|
s0undt3ch/Deluge
|
refs/heads/master
|
deluge/ui/gtkui/queuedtorrents.py
|
1
|
#
# queuedtorrents.py
#
# Copyright (C) 2007 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import base64
import os.path
import gtk
import gtk.glade
import logging
import gobject
import deluge.component as component
from deluge.ui.client import client
import deluge.common
from deluge.configmanager import ConfigManager
import common
log = logging.getLogger(__name__)
class QueuedTorrents(component.Component):
def __init__(self):
component.Component.__init__(self, "QueuedTorrents", depend=["StatusBar", "AddTorrentDialog"])
self.queue = []
self.status_item = None
self.config = ConfigManager("gtkui.conf")
self.glade = gtk.glade.XML(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "queuedtorrents.glade"))
)
self.glade.get_widget("chk_autoadd").set_active(
self.config["autoadd_queued"])
self.dialog = self.glade.get_widget("queued_torrents_dialog")
self.dialog.set_icon(common.get_logo(32))
self.glade.signal_autoconnect({
"on_button_remove_clicked": self.on_button_remove_clicked,
"on_button_clear_clicked": self.on_button_clear_clicked,
"on_button_close_clicked": self.on_button_close_clicked,
"on_button_add_clicked": self.on_button_add_clicked,
"on_chk_autoadd_toggled": self.on_chk_autoadd_toggled
})
self.treeview = self.glade.get_widget("treeview")
self.treeview.append_column(
gtk.TreeViewColumn(_("Torrent"), gtk.CellRendererText(), text=0))
self.liststore = gtk.ListStore(str, str)
self.treeview.set_model(self.liststore)
def run(self):
self.dialog.set_transient_for(component.get("MainWindow").window)
self.dialog.show()
def start(self):
if len(self.queue) == 0:
return
if self.config["autoadd_queued"] or self.config["classic_mode"]:
self.on_button_add_clicked(None)
return
# Make sure status bar info is showing
self.update_status_bar()
# We only want the add button sensitive if we're connected to a host
self.glade.get_widget("button_add").set_sensitive(True)
self.run()
def stop(self):
# We only want the add button sensitive if we're connected to a host
self.glade.get_widget("button_add").set_sensitive(False)
self.update_status_bar()
def add_to_queue(self, torrents):
"""Adds the list of torrents to the queue"""
# Add to the queue while removing duplicates
self.queue = list(set(self.queue + torrents))
# Update the liststore
self.liststore.clear()
for torrent in self.queue:
self.liststore.append([os.path.split(torrent)[1], torrent])
# Update the status bar
self.update_status_bar()
def update_status_bar(self):
"""Attempts to update status bar"""
# If there are no queued torrents.. remove statusbar widgets and return
if len(self.queue) == 0:
if self.status_item != None:
component.get("StatusBar").remove_item(self.status_item)
self.status_item = None
return False
try:
statusbar = component.get("StatusBar")
except Exception, e:
# The statusbar hasn't been loaded yet, so we'll add a timer to
# update it later.
gobject.timeout_add(100, self.update_status_bar)
return False
# Set the label text for statusbar
if len(self.queue) > 1:
label = str(len(self.queue)) + _(" Torrents Queued")
else:
label = str(len(self.queue)) + _(" Torrent Queued")
# Add the statusbar items if needed, or just modify the label if they
# have already been added.
if self.status_item == None:
self.status_item = component.get("StatusBar").add_item(
stock=gtk.STOCK_SORT_DESCENDING,
text=label,
callback=self.on_statusbar_click)
else:
self.status_item.set_text(label)
# We return False so the timer stops
return False
def on_statusbar_click(self, widget, event):
log.debug("on_statusbar_click")
self.run()
def on_button_remove_clicked(self, widget):
selected = self.treeview.get_selection().get_selected()[1]
if selected != None:
path = self.liststore.get_value(selected, 1)
self.liststore.remove(selected)
self.queue.remove(path)
self.update_status_bar()
def on_button_clear_clicked(self, widget):
self.liststore.clear()
self.update_status_bar()
def on_button_close_clicked(self, widget):
self.dialog.hide()
def on_button_add_clicked(self, widget):
# Add all the torrents in the liststore
def add_torrent(model, path, iter, data):
torrent_path = model.get_value(iter, 1)
if deluge.common.is_url(torrent_path):
if self.config["interactive_add"]:
def on_show(result):
component.get("AddTorrentDialog").add_from_url(torrent_path)
d = component.get("AddTorrentDialog").show(self.config["focus_add_dialog"])
d.addCallback(on_show)
else:
client.core.add_torrent_url(torrent_path, None)
elif deluge.common.is_magnet(torrent_path):
if self.config["interactive_add"]:
def on_show(result):
component.get("AddTorrentDialog").add_from_magnets([torrent_path])
d = component.get("AddTorrentDialog").show(self.config["focus_add_dialog"])
d.addCallback(on_show)
else:
client.core.add_magnet_uris([torrent_path], [])
else:
if self.config["interactive_add"]:
def on_show(result):
component.get("AddTorrentDialog").add_from_files([torrent_path])
d = component.get("AddTorrentDialog").show(self.config["focus_add_dialog"])
d.addCallback(on_show)
else:
client.core.add_torrent_file(
os.path.split(torrent_path)[-1],
base64.encodestring(open(torrent_path, "rb").read()),
None)
self.liststore.foreach(add_torrent, None)
del self.queue[:]
self.dialog.hide()
self.update_status_bar()
def on_chk_autoadd_toggled(self, widget):
self.config["autoadd_queued"] = widget.get_active()
|
santod/google_kernel_m7_3.4.10-g1a25406
|
refs/heads/master
|
tools/perf/scripts/python/net_dropmonitor.py
|
4235
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
shahbazn/neutron
|
refs/heads/master
|
neutron/db/quota/__init__.py
|
12133432
| |
rpmcpp/Audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/perl.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Options,Utils
from waflib.Configure import conf
from waflib.TaskGen import extension,feature,before_method
@before_method('apply_incpaths','apply_link','propagate_uselib_vars')
@feature('perlext')
def init_perlext(self):
self.uselib=self.to_list(getattr(self,'uselib',[]))
if not'PERLEXT'in self.uselib:self.uselib.append('PERLEXT')
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['perlext_PATTERN']
@extension('.xs')
def xsubpp_file(self,node):
outnode=node.change_ext('.c')
self.create_task('xsubpp',node,outnode)
self.source.append(outnode)
class xsubpp(Task.Task):
run_str='${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}'
color='BLUE'
ext_out=['.h']
@conf
def check_perl_version(self,minver=None):
res=True
if minver:
cver='.'.join(map(str,minver))
else:
cver=''
self.start_msg('Checking for minimum perl version %s'%cver)
perl=getattr(Options.options,'perlbinary',None)
if not perl:
perl=self.find_program('perl',var='PERL')
if not perl:
self.end_msg("Perl not found",color="YELLOW")
return False
self.env['PERL']=perl
version=self.cmd_and_log([perl,"-e",'printf \"%vd\", $^V'])
if not version:
res=False
version="Unknown"
elif not minver is None:
ver=tuple(map(int,version.split(".")))
if ver<minver:
res=False
self.end_msg(version,color=res and"GREEN"or"YELLOW")
return res
@conf
def check_perl_module(self,module):
cmd=[self.env['PERL'],'-e','use %s'%module]
self.start_msg('perl module %s'%module)
try:
r=self.cmd_and_log(cmd)
except Exception:
self.end_msg(False)
return None
self.end_msg(r or True)
return r
@conf
def check_perl_ext_devel(self):
env=self.env
perl=env.PERL
if not perl:
self.fatal('find perl first')
def read_out(cmd):
return Utils.to_list(self.cmd_and_log(perl+cmd))
env['LINKFLAGS_PERLEXT']=read_out(" -MConfig -e'print $Config{lddlflags}'")
env['INCLUDES_PERLEXT']=read_out(" -MConfig -e'print \"$Config{archlib}/CORE\"'")
env['CFLAGS_PERLEXT']=read_out(" -MConfig -e'print \"$Config{ccflags} $Config{cccdlflags}\"'")
env['XSUBPP']=read_out(" -MConfig -e'print \"$Config{privlib}/ExtUtils/xsubpp$Config{exe_ext}\"'")
env['EXTUTILS_TYPEMAP']=read_out(" -MConfig -e'print \"$Config{privlib}/ExtUtils/typemap\"'")
if not getattr(Options.options,'perlarchdir',None):
env['ARCHDIR_PERL']=self.cmd_and_log(perl+" -MConfig -e'print $Config{sitearch}'")
else:
env['ARCHDIR_PERL']=getattr(Options.options,'perlarchdir')
env['perlext_PATTERN']='%s.'+self.cmd_and_log(perl+" -MConfig -e'print $Config{dlext}'")
def options(opt):
opt.add_option('--with-perl-binary',type='string',dest='perlbinary',help='Specify alternate perl binary',default=None)
opt.add_option('--with-perl-archdir',type='string',dest='perlarchdir',help='Specify directory where to install arch specific files',default=None)
|
shaufi/odoo
|
refs/heads/8.0
|
addons/payment_transfer/__openerp__.py
|
391
|
# -*- coding: utf-8 -*-
{
'name': 'Transfer Payment Acquirer',
'category': 'Hidden',
'summary': 'Payment Acquirer: Transfer Implementation',
'version': '1.0',
'description': """Transfer Payment Acquirer""",
'author': 'OpenERP SA',
'depends': ['payment'],
'data': [
'views/transfer.xml',
'data/transfer.xml',
],
'installable': True,
'auto_install': True,
}
|
damdam-s/stock-logistics-workflow
|
refs/heads/8.0
|
__unported__/stock_picking_show_returns/__init__.py
|
23
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock_picking
|
WillStewart1994/NoSleep
|
refs/heads/master
|
requests/packages/chardet/escprober.py
|
2935
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
wbc2010/django1.2.5
|
refs/heads/master
|
tests/regressiontests/forms/__init__.py
|
12133432
| |
jaesivsm/JARR
|
refs/heads/master
|
tests/crawler/__init__.py
|
12133432
| |
NCI-Cloud/horizon
|
refs/heads/nci/kilo
|
openstack_dashboard/dashboards/project/loadbalancers/__init__.py
|
12133432
| |
g82411/s_square
|
refs/heads/master
|
s_square/__init__.py
|
12133432
| |
anksp21/Community-Zenpacks
|
refs/heads/master
|
ZenPacks.community.Gentoo/ZenPacks/community/Gentoo/parsers/__init__.py
|
12133432
| |
jasonbot/django
|
refs/heads/master
|
tests/admin_scripts/complex_app/management/commands/__init__.py
|
12133432
| |
DREAM-ODA-OS/tools
|
refs/heads/master
|
imgproc/smooth_mask.py
|
1
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
#
# This tool tries to smooth mask edges by blurring and thresholding.
# While blur makes the edges smooth the thresholding produces new binary
# mask.
#
# For blurring is made by the (separated) Gaussian convolution filter
# The value of the threshold controls position of the new mask border.
#
# Author: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import sys
from os.path import basename
from img import (
FormatOptions, ImageFileReader, create_geotiff, DEF_GEOTIFF_FOPT,
Progress, Block, execute,
)
from img.cli import error
from img.algs import threshold_values, normalize_values
from img.filters import coeff1d_gauss, filter_conv_separable, mirror_borders
def usage():
""" Print simple usage help. """
exename = basename(sys.argv[0])
def _generate_():
yield (
"USAGE: %s <input mask> <output mask/TIF> <threshold> <radius>"
"" % exename
)
yield "EXAMPLE: %s mask_in.tif mask_out.tif 0.5 20" % exename
yield "DESCRIPTION:"
yield " This tools tries to smooth edges of a binary mask by "
yield " blurring and consecutive threshing of the pixel values. "
yield " The <radius> defines size of the convolution window "
yield " (number of pixels surrounding the central pixel.) "
yield " so the that window size is <window_size> = 2 * <radius> + 1"
yield " The <threshold> defines the trimming threshold and it should"
yield " be from 0.0 to 1.0 range."
for line in _generate_():
print >>sys.stderr, line
def process(tile, img_in, img_out, threshold, whs, false, true):
""" Process one tile. """
# pylint: disable=too-many-arguments
tile = tile & img_in # clip tile to the input image extent
b_in = img_in.read(Block(img_in.dtype, tile.extend((whs, whs))))
b_in = normalize_values(b_in, false, true)
b_in = mirror_borders(b_in, img_in)
kernel1d = coeff1d_gauss(whs)
b_out = filter_conv_separable(b_in, kernel1d, kernel1d)
img_out.write(threshold_values(b_out, threshold, false, true))
if __name__ == "__main__":
MASKBG = 0x00
MASKFG = 0xFF
FOPTS = FormatOptions(DEF_GEOTIFF_FOPT) # default format options
try:
INPUT = sys.argv[1]
OUTPUT = sys.argv[2]
# trimming threshold from 0.0 to 1.0
THRESHOLD = max(0.0, min(1.0, float(sys.argv[3])))
# window half size >= 1
WHS = max(1, int(sys.argv[4]))
for opt in sys.argv[5:]:
#anything else is treated as a format option
FOPTS.set_option(opt)
except IndexError:
error("Not enough input arguments!")
usage()
sys.exit(1)
# open input image
IMG_IN = ImageFileReader(INPUT)
# check mask properties
if IMG_IN.size.z > 1:
error("Multi-band masks are not supported!")
sys.exit(1)
if IMG_IN.dtype != 'uint8':
error("Unsupported mask data type '%s'!" % IMG_IN.dtype)
sys.exit(1)
# creation parameters
PARAM = {
'path' : OUTPUT,
'nrow' : IMG_IN.size.y,
'ncol' : IMG_IN.size.x,
'nband' : 1,
'dtype' : 'uint8',
'options' : FOPTS.options,
}
PARAM.update(IMG_IN.geocoding) # add geo-coding
# open output image
IMG_OUT = create_geotiff(**PARAM)
# block size
#TILE_SIZE = (int(FOPTS["BLOCKXSIZE"]), int(FOPTS["BLOCKYSIZE"]))
TILE_SIZE = (512, 512) # using larger tiles
print "Smoothing mask ..."
execute(
IMG_OUT.tiles(TILE_SIZE), process, (
IMG_IN, IMG_OUT, THRESHOLD, WHS, MASKBG, MASKFG,
),
progress=Progress(sys.stdout, IMG_OUT.tile_count(TILE_SIZE)),
)
|
40323102/2014_cpa_final_project-40323102
|
refs/heads/master
|
helloworld.py
|
73
|
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index(self, toprint="Hello World!"):
return toprint
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(Hello(), config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(Hello(), config=application_conf)
|
canh/icanfly
|
refs/heads/master
|
xplane/parser.py
|
1
|
from control import state
import struct
DATA_FORMAT = "<I8f"
DATA_HEADER_FORMAT = "<4cB"
DATA_LEN = 36
DATA_OFFSET = 5
XP_LOCATION_INDEX = 20
XP_ATTITUDE_INDEX = 17
XP_CONTROL_INDEX = 11
def parse_state(data):
parsed = parse_raw(data)
return _get_location(parsed), _get_attitude(parsed)
def parse_raw(data):
sliced = [data[x:x + DATA_LEN] for x in range(DATA_OFFSET, len(data), DATA_LEN)]
parsed = [struct.unpack(DATA_FORMAT, x) for x in sliced]
# Create a dictionary, keys -> first elements of each tuple; values -> remaining elements
return dict(map(lambda t: (t[0], t[1:]), parsed))
def from_input(control):
header = struct.pack(DATA_HEADER_FORMAT, b"D", b"A", b"T", b"A", 0)
# XPlane Rudder input is between 0 .. 0.2, so we scale down elevator input
raw_data = _to_raw_data(XP_CONTROL_INDEX, control.elevator, control.aileron, (control.rudder / 5))
data = struct.pack(DATA_FORMAT, *raw_data)
return header + data
def _to_raw_data(index, *data_points):
data_len = len(data_points)
if data_len > 8:
raise ValueError("tuple should have at most 8 values")
return [index] + [data_points[x] if x < data_len else -999 for x in range(0, 8)]
def _get_location(parsed_map):
raw_location = parsed_map.get(XP_LOCATION_INDEX, None)
if raw_location is None:
return None
return state.Location(raw_location[0], raw_location[1],
_convert_meter(raw_location[2]),_convert_meter(raw_location[3]))
def _get_attitude(parsed_map):
raw_attitude = parsed_map.get(XP_ATTITUDE_INDEX, None)
if raw_attitude is None:
return None
return state.Attitude(raw_attitude[0], raw_attitude[2], raw_attitude[1])
'''
TODO: Find & use a module for unit conversions
'''
def _convert_meter(foot):
return foot * .305
|
michalliu/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/idlelib/CodeContext.py
|
13
|
"""CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import tkinter
from tkinter.constants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxsize as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"}
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
|
RomainBrault/scikit-learn
|
refs/heads/master
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
44
|
""" test the label propagation module """
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.semi_supervised import label_propagation
from sklearn.metrics.pairwise import rbf_kernel
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelPropagation, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {
'kernel': lambda x, y: rbf_kernel(x, y, gamma=20)
}),
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
|
da1z/intellij-community
|
refs/heads/master
|
python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py
|
320
|
"""
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
|
spinellic/Mission-Planner
|
refs/heads/master
|
Lib/site-packages/scipy/ndimage/filters.py
|
55
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
from scipy.misc import doccer
_input_doc = \
"""input : array-like
input array to filter"""
_axis_doc = \
"""axis : integer, optional
axis of ``input`` along which to calculate. Default is -1"""
_output_doc = \
"""output : array, optional
The ``output`` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either ``size`` or ``footprint`` must be defined. ``size`` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
``footprint`` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and ``size`` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The ``origin`` parameter controls the placement of the filter. Default 0"""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input':_input_doc,
'axis':_axis_doc,
'output':_output_doc,
'size_foot':_size_foot_doc,
'mode':_mode_doc,
'cval':_cval_doc,
'origin':_origin_doc,
'extra_arguments':_extra_arguments_doc,
'extra_keywords':_extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis = -1, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
one-dimensional sequence of numbers
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if ((len(weights) // 2 + origin < 0) or
(len(weights) // 2 + origin > len(weights))):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis = -1, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
one-dimensional sequence of numbers
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis = -1, order = 0, output = None,
mode = "reflect", cval = 0.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1 : # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order = 0, output = None,
mode = "reflect", cval = 0.0):
"""Multi-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output = None, mode = "reflect",
cval = 0.0,
extra_arguments = (),
extra_keywords = None):
"""Calculate a multidimensional laplace filter using the provided
second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See ``extra_arguments``, ``extra_keywords`` below
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = range(input.ndim)
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output = None, mode = "reflect", cval = 0.0):
"""Calculate a multidimensional laplace filter using an estimation
for the second derivative based on differences.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output = None, mode = "reflect",
cval = 0.0):
"""Calculate a multidimensional laplace filter using gaussian
second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments = (sigma,))
@docfiller
def generic_gradient_magnitude(input, derivative, output = None,
mode = "reflect", cval = 0.0,
extra_arguments = (), extra_keywords = None):
"""Calculate a gradient magnitude using the provided function for
the gradient.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See ``extra_arguments``, ``extra_keywords`` below
``derivative`` can assume that ``input`` and ``output`` are
ndarrays.
Note that the output from ``derivative`` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = range(input.ndim)
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
numpy.sqrt(output, output)
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output = None,
mode = "reflect", cval = 0.0):
"""Calculate a multidimensional gradient magnitude using gaussian
derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments = (sigma,))
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(int):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output = None, mode = 'reflect', cval = 0.0,
origin = 0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0,
origin = 0):
"""
Multi-dimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
.... [5, 3, 0, 4],
.... [0, 0, 0, 7],
.... [9, 3, 0, 0]])
>>> b = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0,1,0],[0,1,0],[0,1,0]])
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : integer
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin > size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size = 3, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin > size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D maximum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin > size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable= True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint),axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0, operation = 'rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origin)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origin)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : integer
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""
Calculates a multi-dimensional median filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either ``size`` or ``footprint`` must be defined. ``size`` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
``footprint`` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and ``size`` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size = None, footprint = None,
output = None, mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis = -1,
output = None, mode = "reflect", cval = 0.0, origin = 0,
extra_arguments = (), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
generic_filter1d iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
function to apply along given axis
filter_size : scalar
length of the filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if ((filter_size // 2 + origin < 0) or
(filter_size // 2 + origin > filter_size)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size = None, footprint = None,
output = None, mode = "reflect", cval = 0.0, origin = 0,
extra_arguments = (), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
function to apply at each element
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
jaja14/lab4
|
refs/heads/master
|
lib/flask/globals.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask.globals
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import partial
from werkzeug.local import LocalStack, LocalProxy
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('working outside of request context')
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app = LocalProxy(_find_app)
request = LocalProxy(partial(_lookup_req_object, 'request'))
session = LocalProxy(partial(_lookup_req_object, 'session'))
g = LocalProxy(partial(_lookup_app_object, 'g'))
|
edx/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/demographics/tests/test_status.py
|
3
|
"""
Test status utilities
"""
from unittest import TestCase
from unittest import mock
from django.conf import settings
from opaque_keys.edx.keys import CourseKey
from pytest import mark
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.tests.factories import UserFactory
from openedx.core.djangoapps.catalog.tests.factories import (
ProgramFactory,
)
from openedx.core.djangolib.testing.utils import skip_unless_lms
from openedx.features.enterprise_support.tests.factories import EnterpriseCustomerUserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory
if settings.ROOT_URLCONF == 'lms.urls':
from openedx.core.djangoapps.demographics.api.status import show_user_demographics, show_call_to_action_for_user
from openedx.core.djangoapps.demographics.tests.factories import UserDemographicsFactory
MICROBACHELORS = 'microbachelors'
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.programs.utils.get_programs_by_type')
class TestShowDemographics(SharedModuleStoreTestCase):
"""
Tests for whether the demographics collection fields should be shown
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.store = modulestore()
cls.user = UserFactory()
cls.program = ProgramFactory(type=MICROBACHELORS)
cls.catalog_course_run = cls.program['courses'][0]['course_runs'][0]
cls.course_key = CourseKey.from_string(cls.catalog_course_run['key'])
cls.course_run = CourseFactory.create(
org=cls.course_key.org,
number=cls.course_key.course,
run=cls.course_key.run,
modulestore=cls.store,
)
CourseModeFactory.create(course_id=cls.course_run.id, mode_slug=CourseMode.VERIFIED)
def test_user_enterprise(self, mock_get_programs_by_type):
mock_get_programs_by_type.return_value = [self.program]
EnterpriseCustomerUserFactory.create(user_id=self.user.id)
assert not show_user_demographics(user=self.user)
@skip_unless_lms
@mark.django_db
class TestShowCallToAction(TestCase):
"""
Tests for whether the demographics call to action should be shown
"""
def setUp(self):
super().setUp()
self.user = UserFactory()
def test_new_user(self):
assert show_call_to_action_for_user(self.user)
def test_existing_user_no_dismiss(self):
user_demographics = UserDemographicsFactory.create(user=self.user)
assert user_demographics.show_call_to_action
assert show_call_to_action_for_user(self.user)
def test_existing_user_dismissed(self):
user_demographics = UserDemographicsFactory.create(user=self.user)
user_demographics.show_call_to_action = False
user_demographics.save()
assert not user_demographics.show_call_to_action
assert not show_call_to_action_for_user(self.user)
|
natcoin/natcoin
|
refs/heads/master
|
share/qt/clean_mac_info_plist.py
|
1
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Natcoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Natcoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
Jackysonglanlan/devops
|
refs/heads/master
|
IDEs/sublime/shared-pkgs/Packages/pygments/all/pygments/lexers/parsers.py
|
47
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.parsers
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for parser generators.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, \
include, bygroups, using
from pygments.token import Punctuation, Other, Text, Comment, Operator, \
Keyword, Name, String, Number, Whitespace
from pygments.lexers.jvm import JavaLexer
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers.objective import ObjectiveCLexer
from pygments.lexers.d import DLexer
from pygments.lexers.dotnet import CSharpLexer
from pygments.lexers.ruby import RubyLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.perl import PerlLexer
__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer',
'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer',
# 'AntlrCLexer',
'AntlrCSharpLexer', 'AntlrObjectiveCLexer',
'AntlrJavaLexer', 'AntlrActionScriptLexer',
'TreetopLexer', 'EbnfLexer']
class RagelLexer(RegexLexer):
"""
A pure `Ragel <http://www.complang.org/ragel/>`_ lexer. Use this for
fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead
(or one of the language-specific subclasses).
.. versionadded:: 1.1
"""
name = 'Ragel'
aliases = ['ragel']
filenames = []
tokens = {
'whitespace': [
(r'\s+', Whitespace)
],
'comments': [
(r'\#.*$', Comment),
],
'keywords': [
(r'(access|action|alphtype)\b', Keyword),
(r'(getkey|write|machine|include)\b', Keyword),
(r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword),
(r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword)
],
'numbers': [
(r'0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
'literals': [
(r'"(\\\\|\\"|[^"])*"', String), # double quote string
(r"'(\\\\|\\'|[^'])*'", String), # single quote string
(r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals
(r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions
],
'identifiers': [
(r'[a-zA-Z_]\w*', Name.Variable),
],
'operators': [
(r',', Operator), # Join
(r'\||&|--?', Operator), # Union, Intersection and Subtraction
(r'\.|<:|:>>?', Operator), # Concatention
(r':', Operator), # Label
(r'->', Operator), # Epsilon Transition
(r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions
(r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions
(r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions
(r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions
(r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions
(r'>|@|\$|%', Operator), # Transition Actions and Priorities
(r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition
(r'!|\^', Operator), # Negation
(r'\(|\)', Operator), # Grouping
],
'root': [
include('literals'),
include('whitespace'),
include('comments'),
include('keywords'),
include('numbers'),
include('identifiers'),
include('operators'),
(r'\{', Punctuation, 'host'),
(r'=', Operator),
(r';', Punctuation),
],
'host': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^{}\'"/#]+', # exclude unsafe characters
r'[^\\]\\[{}]', # allow escaped { or }
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'\#.*$\n?', # ruby comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
}
class RagelEmbeddedLexer(RegexLexer):
"""
A lexer for `Ragel`_ embedded in a host language file.
This will only highlight Ragel statements. If you want host language
highlighting then call the language-specific Ragel lexer.
.. versionadded:: 1.1
"""
name = 'Embedded Ragel'
aliases = ['ragel-em']
filenames = ['*.rl']
tokens = {
'root': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^%\'"/#]+', # exclude unsafe characters
r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression
# / is safe now that we've handled regex and javadoc comments
r'/',
)) + r')+', Other),
# Single Line FSM.
# Please don't put a quoted newline in a single line FSM.
# That's just mean. It will break this.
(r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation,
using(RagelLexer),
Punctuation, Text)),
# Multi Line FSM.
(r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'),
],
'multi-line-fsm': [
(r'(' + r'|'.join(( # keep ragel code in largest possible chunks.
r'(' + r'|'.join((
r'[^}\'"\[/#]', # exclude unsafe characters
r'\}(?=[^%]|$)', # } is okay as long as it's not followed by %
r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two...
r'[^\\]\\[{}]', # ...and } is okay if it's escaped
# allow / if it's preceded with one of these symbols
# (ragel EOF actions)
r'(>|\$|%|<|@|<>)/',
# specifically allow regex followed immediately by *
# so it doesn't get mistaken for a comment
r'/(?!\*)(\\\\|\\/|[^/])*/\*',
# allow / as long as it's not followed by another / or by a *
r'/(?=[^/*]|$)',
# We want to match as many of these as we can in one block.
# Not sure if we need the + sign here,
# does it help performance?
)) + r')+',
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
r'//.*$\n?', # single line comment
r'\#.*$\n?', # ruby/ragel comment
)) + r')+', using(RagelLexer)),
(r'\}%%', Punctuation, '#pop'),
]
}
def analyse_text(text):
return '@LANG: indep' in text
class RagelRubyLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Ruby Host'
aliases = ['ragel-ruby', 'ragel-rb']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: ruby' in text
class RagelCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a C host file.
.. versionadded:: 1.1
"""
name = 'Ragel in C Host'
aliases = ['ragel-c']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: c' in text
class RagelDLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a D host file.
.. versionadded:: 1.1
"""
name = 'Ragel in D Host'
aliases = ['ragel-d']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: d' in text
class RagelCppLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a CPP host file.
.. versionadded:: 1.1
"""
name = 'Ragel in CPP Host'
aliases = ['ragel-cpp']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options)
def analyse_text(text):
return '@LANG: c++' in text
class RagelObjectiveCLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in an Objective C host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Objective C Host'
aliases = ['ragel-objc']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer,
RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: objc' in text
class RagelJavaLexer(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Java host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Java Host'
aliases = ['ragel-java']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: java' in text
class AntlrLexer(RegexLexer):
"""
Generic `ANTLR`_ Lexer.
Should not be called directly, instead
use DelegatingLexer for your target language.
.. versionadded:: 1.1
.. _ANTLR: http://www.antlr.org/
"""
name = 'ANTLR'
aliases = ['antlr']
filenames = []
_id = r'[A-Za-z]\w*'
_TOKEN_REF = r'[A-Z]\w*'
_RULE_REF = r'[a-z]\w*'
_STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\''
_INT = r'[0-9]+'
tokens = {
'whitespace': [
(r'\s+', Whitespace),
],
'comments': [
(r'//.*$', Comment),
(r'/\*(.|\n)*?\*/', Comment),
],
'root': [
include('whitespace'),
include('comments'),
(r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class,
Punctuation)),
# optionsSpec
(r'options\b', Keyword, 'options'),
# tokensSpec
(r'tokens\b', Keyword, 'tokens'),
# attrScope
(r'(scope)(\s*)(' + _id + ')(\s*)(\{)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation), 'action'),
# exception
(r'(catch|finally)\b', Keyword, 'exception'),
# action
(r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)(\{)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
Name.Label, Whitespace, Punctuation), 'action'),
# rule
(r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?',
bygroups(Keyword, Whitespace, Name.Label, Punctuation),
('rule-alts', 'rule-prelims')),
],
'exception': [
(r'\n', Whitespace, '#pop'),
(r'\s', Whitespace),
include('comments'),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
],
'rule-prelims': [
include('whitespace'),
include('comments'),
(r'returns\b', Keyword),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
# throwsSpec
(r'(throws)(\s+)(' + _id + ')',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(,)(\s*)(' + _id + ')',
bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws
# optionsSpec
(r'options\b', Keyword, 'options'),
# ruleScopeSpec - scope followed by target language code or name of action
# TODO finish implementing other possibilities for scope
# L173 ANTLRv3.g from ANTLR book
(r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation),
'action'),
(r'(scope)(\s+)(' + _id + ')(\s*)(;)',
bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
# ruleAction
(r'(@' + _id + ')(\s*)(\{)',
bygroups(Name.Label, Whitespace, Punctuation), 'action'),
# finished prelims, go to rule alts!
(r':', Punctuation, '#pop')
],
'rule-alts': [
include('whitespace'),
include('comments'),
# These might need to go in a separate 'block' state triggered by (
(r'options\b', Keyword, 'options'),
(r':', Punctuation),
# literals
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'<<([^>]|>[^>])>>', String),
# identifiers
# Tokens start with capital letter.
(r'\$?[A-Z_]\w*', Name.Constant),
# Rules start with small letter.
(r'\$?[a-z_]\w*', Name.Variable),
# operators
(r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator),
(r',', Punctuation),
(r'\[', Punctuation, 'nested-arg-action'),
(r'\{', Punctuation, 'action'),
(r';', Punctuation, '#pop')
],
'tokens': [
include('whitespace'),
include('comments'),
(r'\{', Punctuation),
(r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
+ ')?(\s*)(;)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
String, Whitespace, Punctuation)),
(r'\}', Punctuation, '#pop'),
],
'options': [
include('whitespace'),
include('comments'),
(r'\{', Punctuation),
(r'(' + _id + r')(\s*)(=)(\s*)(' +
'|'.join((_id, _STRING_LITERAL, _INT, '\*')) + ')(\s*)(;)',
bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
Text, Whitespace, Punctuation)),
(r'\}', Punctuation, '#pop'),
],
'action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks
r'[^${}\'"/\\]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# backslashes are okay, as long as we are not backslashing a %
r'\\(?!%)',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'(\\)(%)', bygroups(Punctuation, Other)),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'nested-arg-action': [
(r'(' + r'|'.join(( # keep host code in largest possible chunks.
r'[^$\[\]\'"/]+', # exclude unsafe characters
# strings and comments may safely contain unsafe characters
r'"(\\\\|\\"|[^"])*"', # double quote string
r"'(\\\\|\\'|[^'])*'", # single quote string
r'//.*$\n?', # single line comment
r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment
# regular expression: There's no reason for it to start
# with a * and this stops confusion with comments.
r'/(?!\*)(\\\\|\\/|[^/])*/',
# Now that we've handled regex and javadoc comments
# it's safe to let / through.
r'/',
)) + r')+', Other),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r'(\$[a-zA-Z]+)(\.?)(text|value)?',
bygroups(Name.Variable, Punctuation, Name.Property)),
(r'(\\\\|\\\]|\\\[|[^\[\]])+', Other),
]
}
def analyse_text(text):
return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M)
# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets
# TH: I'm not aware of any language features of C++ that will cause
# incorrect lexing of C files. Antlr doesn't appear to make a distinction,
# so just assume they're C++. No idea how to make Objective C work in the
# future.
# class AntlrCLexer(DelegatingLexer):
# """
# ANTLR with C Target
#
# .. versionadded:: 1.1
# """
#
# name = 'ANTLR With C Target'
# aliases = ['antlr-c']
# filenames = ['*.G', '*.g']
#
# def __init__(self, **options):
# super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options)
#
# def analyse_text(text):
# return re.match(r'^\s*language\s*=\s*C\s*;', text)
class AntlrCppLexer(DelegatingLexer):
"""
`ANTLR`_ with CPP Target
.. versionadded:: 1.1
"""
name = 'ANTLR With CPP Target'
aliases = ['antlr-cpp']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*C\s*;', text, re.M)
class AntlrObjectiveCLexer(DelegatingLexer):
"""
`ANTLR`_ with Objective-C Target
.. versionadded:: 1.1
"""
name = 'ANTLR With ObjectiveC Target'
aliases = ['antlr-objc']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
class AntlrCSharpLexer(DelegatingLexer):
"""
`ANTLR`_ with C# Target
.. versionadded:: 1.1
"""
name = 'ANTLR With C# Target'
aliases = ['antlr-csharp', 'antlr-c#']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M)
class AntlrPythonLexer(DelegatingLexer):
"""
`ANTLR`_ with Python Target
.. versionadded:: 1.1
"""
name = 'ANTLR With Python Target'
aliases = ['antlr-python']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M)
class AntlrJavaLexer(DelegatingLexer):
"""
`ANTLR`_ with Java Target
.. versionadded:: 1.
"""
name = 'ANTLR With Java Target'
aliases = ['antlr-java']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer,
**options)
def analyse_text(text):
# Antlr language is Java by default
return AntlrLexer.analyse_text(text) and 0.9
class AntlrRubyLexer(DelegatingLexer):
"""
`ANTLR`_ with Ruby Target
.. versionadded:: 1.1
"""
name = 'ANTLR With Ruby Target'
aliases = ['antlr-ruby', 'antlr-rb']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M)
class AntlrPerlLexer(DelegatingLexer):
"""
`ANTLR`_ with Perl Target
.. versionadded:: 1.1
"""
name = 'ANTLR With Perl Target'
aliases = ['antlr-perl']
filenames = ['*.G', '*.g']
def __init__(self, **options):
super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer,
**options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M)
class AntlrActionScriptLexer(DelegatingLexer):
"""
`ANTLR`_ with ActionScript Target
.. versionadded:: 1.1
"""
name = 'ANTLR With ActionScript Target'
aliases = ['antlr-as', 'antlr-actionscript']
filenames = ['*.G', '*.g']
def __init__(self, **options):
from pygments.lexers.actionscript import ActionScriptLexer
super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer,
AntlrLexer, **options)
def analyse_text(text):
return AntlrLexer.analyse_text(text) and \
re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M)
class TreetopBaseLexer(RegexLexer):
"""
A base lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
Not for direct use; use TreetopLexer instead.
.. versionadded:: 1.6
"""
tokens = {
'root': [
include('space'),
(r'require[ \t]+[^\n\r]+[\n\r]', Other),
(r'module\b', Keyword.Namespace, 'module'),
(r'grammar\b', Keyword, 'grammar'),
],
'module': [
include('space'),
include('end'),
(r'module\b', Keyword, '#push'),
(r'grammar\b', Keyword, 'grammar'),
(r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace),
],
'grammar': [
include('space'),
include('end'),
(r'rule\b', Keyword, 'rule'),
(r'include\b', Keyword, 'include'),
(r'[A-Z]\w*', Name),
],
'include': [
include('space'),
(r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'),
],
'rule': [
include('space'),
include('end'),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)),
(r'[A-Za-z_]\w*', Name),
(r'[()]', Punctuation),
(r'[?+*/&!~]', Operator),
(r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex),
(r'([0-9]*)(\.\.)([0-9]*)',
bygroups(Number.Integer, Operator, Number.Integer)),
(r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)),
(r'\{', Punctuation, 'inline_module'),
(r'\.', String.Regex),
],
'inline_module': [
(r'\{', Other, 'ruby'),
(r'\}', Punctuation, '#pop'),
(r'[^{}]+', Other),
],
'ruby': [
(r'\{', Other, '#push'),
(r'\}', Other, '#pop'),
(r'[^{}]+', Other),
],
'space': [
(r'[ \t\n\r]+', Whitespace),
(r'#[^\n]*', Comment.Single),
],
'end': [
(r'end\b', Keyword, '#pop'),
],
}
class TreetopLexer(DelegatingLexer):
"""
A lexer for `Treetop <http://treetop.rubyforge.org/>`_ grammars.
.. versionadded:: 1.6
"""
name = 'Treetop'
aliases = ['treetop']
filenames = ['*.treetop', '*.tt']
def __init__(self, **options):
super(TreetopLexer, self).__init__(RubyLexer, TreetopBaseLexer, **options)
class EbnfLexer(RegexLexer):
"""
Lexer for `ISO/IEC 14977 EBNF
<http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
grammars.
.. versionadded:: 2.0
"""
name = 'EBNF'
aliases = ['ebnf']
filenames = ['*.ebnf']
mimetypes = ['text/x-ebnf']
tokens = {
'root': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'=', Operator, 'production'),
],
'production': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'"[^"]*"', String.Double),
(r"'[^']*'", String.Single),
(r'(\?[^?]*\?)', Name.Entity),
(r'[\[\]{}(),|]', Punctuation),
(r'-', Operator),
(r';', Punctuation, '#pop'),
(r'\.', Punctuation, '#pop'),
],
'whitespace': [
(r'\s+', Text),
],
'comment_start': [
(r'\(\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
include('comment_start'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[*)]', Comment.Multiline),
],
'identifier': [
(r'([a-zA-Z][\w \-]*)', Keyword),
],
}
|
alrusdi/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/forms/localflavor/ie.py
|
89
|
from django.contrib.localflavor.ie.forms import IECountySelect
from utils import LocalFlavorTestCase
class IELocalFlavorTests(LocalFlavorTestCase):
def test_IECountySelect(self):
f = IECountySelect()
out = u'''<select name="counties">
<option value="antrim">Antrim</option>
<option value="armagh">Armagh</option>
<option value="carlow">Carlow</option>
<option value="cavan">Cavan</option>
<option value="clare">Clare</option>
<option value="cork">Cork</option>
<option value="derry">Derry</option>
<option value="donegal">Donegal</option>
<option value="down">Down</option>
<option value="dublin" selected="selected">Dublin</option>
<option value="fermanagh">Fermanagh</option>
<option value="galway">Galway</option>
<option value="kerry">Kerry</option>
<option value="kildare">Kildare</option>
<option value="kilkenny">Kilkenny</option>
<option value="laois">Laois</option>
<option value="leitrim">Leitrim</option>
<option value="limerick">Limerick</option>
<option value="longford">Longford</option>
<option value="louth">Louth</option>
<option value="mayo">Mayo</option>
<option value="meath">Meath</option>
<option value="monaghan">Monaghan</option>
<option value="offaly">Offaly</option>
<option value="roscommon">Roscommon</option>
<option value="sligo">Sligo</option>
<option value="tipperary">Tipperary</option>
<option value="tyrone">Tyrone</option>
<option value="waterford">Waterford</option>
<option value="westmeath">Westmeath</option>
<option value="wexford">Wexford</option>
<option value="wicklow">Wicklow</option>
</select>'''
self.assertEqual(f.render('counties', 'dublin'), out)
|
cesarmarinhorj/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
|
124
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cStringIO as StringIO
import unittest2 as unittest
import diff_parser
import re
from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
class DiffParserTest(unittest.TestCase):
maxDiff = None
def test_diff_parser(self, parser = None):
if not parser:
parser = diff_parser.DiffParser(DIFF_TEST_DATA.splitlines())
self.assertEqual(3, len(parser.files))
self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files)
diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h']
self.assertEqual(7, len(diff.lines))
# The first two unchaged lines.
self.assertEqual((47, 47), diff.lines[0][0:2])
self.assertEqual('', diff.lines[0][2])
self.assertEqual((48, 48), diff.lines[1][0:2])
self.assertEqual(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2])
# The deleted line
self.assertEqual((50, 0), diff.lines[3][0:2])
self.assertEqual(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2])
# The first file looks OK. Let's check the next, more complicated file.
self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files)
diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp']
# There are 3 chunks.
self.assertEqual(7 + 7 + 9, len(diff.lines))
# Around an added line.
self.assertEqual((60, 61), diff.lines[9][0:2])
self.assertEqual((0, 62), diff.lines[10][0:2])
self.assertEqual((61, 63), diff.lines[11][0:2])
# Look through the last chunk, which contains both add's and delete's.
self.assertEqual((81, 83), diff.lines[14][0:2])
self.assertEqual((82, 84), diff.lines[15][0:2])
self.assertEqual((83, 85), diff.lines[16][0:2])
self.assertEqual((84, 0), diff.lines[17][0:2])
self.assertEqual((0, 86), diff.lines[18][0:2])
self.assertEqual((0, 87), diff.lines[19][0:2])
self.assertEqual((85, 88), diff.lines[20][0:2])
self.assertEqual((86, 89), diff.lines[21][0:2])
self.assertEqual((87, 90), diff.lines[22][0:2])
# Check if a newly added file is correctly handled.
diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum']
self.assertEqual(1, len(diff.lines))
self.assertEqual((0, 1), diff.lines[0][0:2])
def test_diff_converter(self):
comment_lines = [
"Hey guys,\n",
"\n",
"See my awesome patch below!\n",
"\n",
" - Cool Hacker\n",
"\n",
]
revision_lines = [
"Subversion Revision 289799\n",
]
svn_diff_lines = [
"Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"===================================================================\n",
"--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
]
self.assertEqual(diff_parser.get_diff_converter(svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(comment_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(revision_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
git_diff_lines = [
"diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"index 3c5b45b..0197ead 100644\n",
"--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
"@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
]
self.assertEqual(diff_parser.get_diff_converter(git_diff_lines), diff_parser.git_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(comment_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
self.assertEqual(diff_parser.get_diff_converter(revision_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
def test_git_mnemonicprefix(self):
p = re.compile(r' ([a|b])/')
prefixes = [
{ 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree)
{ 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree)
{ 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex)
{ 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity)
{ 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2))
]
for prefix in prefixes:
patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], DIFF_TEST_DATA)
self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
def test_git_diff_to_svn_diff(self):
output = """\
Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py
===================================================================
--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
A
B
C
+D
E
F
"""
inputfmt = StringIO.StringIO("""\
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
index 2ed552c4555db72df16b212547f2c125ae301a04..72870482000c0dba64ce4300ed782c03ee79b74f 100644
--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
A
B
C
+D
E
F
""")
shortfmt = StringIO.StringIO("""\
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
index b48b162..f300960 100644
--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
A
B
C
+D
E
F
""")
self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in shortfmt.readlines()))
self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in inputfmt.readlines()))
|
tjanez/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/items.py
|
251
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return self._flatten(terms)
|
jymannob/CouchPotatoServer
|
refs/heads/develop
|
libs/requests/packages/charade/langthaimodel.py
|
2929
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
pierreg/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/seq2seq_test.py
|
12
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import tensorflow as tf
class Seq2SeqTest(tf.test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
_, enc_state = tf.nn.rnn(
tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32)
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(2), 4)
dec, mem = tf.nn.seq2seq.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
_, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_rnn_decoder(
dec_inp, enc_state, cell, num_symbols=4, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell1 = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell1, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with tf.variable_scope("decoder_symbols_seq2seq"):
dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3,
embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2,
feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4,
num_heads=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32)
attn_states = enc_outputs
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.GRUCell(2)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.dynamic_rnn(cell, inp, dtype=tf.float32)
attn_states = enc_outputs
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4,
num_heads=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = [tf.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2,
state_is_tuple=True)
inp = tf.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
dec, mem = tf.nn.seq2seq.attention_decoder(
dec_inp, enc_state,
attn_states, cell, output_size=4)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inp = [tf.constant(0.5, shape=[2, 2])] * 2
cell = tf.nn.rnn_cell.GRUCell(2)
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs])
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec, mem = tf.nn.seq2seq.embedding_attention_decoder(
dec_inp, enc_state, attn_states, cell, num_symbols=4,
embedding_size=2, output_size=3)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with tf.variable_scope("no_tuple"):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = tf.get_variable("proj_w", [2, 5])
b = tf.get_variable("proj_b", [5])
with tf.variable_scope("proj_seq2seq"):
dec, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, output_projection=(w, b))
sess.run([tf.initialize_all_variables()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
with tf.variable_scope("other"):
d3, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2,
feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp2, cell, num_encoder_symbols=2,
num_decoder_symbols=5, embedding_size=2, feed_previous=True)
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
enc_inp = [tf.constant(1, tf.int32, shape=[2]) for i in range(2)]
dec_inp_dict = {}
dec_inp_dict["0"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
dec_inp_dict["1"] = [
tf.constant(i, tf.int32, shape=[2]) for i in range(4)]
dec_symbols_dict = {"0": 5, "1": 6}
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
outputs_dict, state_dict = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict, embedding_size=2)
sess.run([tf.initialize_all_variables()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(3)]
dec_inp_dict2["1"] = [
tf.constant(0, tf.int32, shape=[2]) for _ in range(4)]
with tf.variable_scope("other"):
outputs_dict3, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=tf.constant(True))
sess.run([tf.initialize_all_variables()])
tf.get_variable_scope().reuse_variables()
outputs_dict1, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
outputs_dict2, _ = tf.nn.seq2seq.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict2, cell, 2, dec_symbols_dict,
embedding_size=2, feed_previous=True)
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [tf.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = tf.nn.seq2seq.sequence_loss(
logits, targets, weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [tf.constant(i + 0.5, shape=[2, output_classes])
for i in range(3)]
targets = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
weights = [tf.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = tf.nn.seq2seq.sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
def testModelWithBucketsScopeAndLoss(self):
"""Test that variable scope reuse is not reset after model_with_buckets."""
classes = 10
buckets = [(4, 4), (8, 8)]
with self.test_session():
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24)
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
per_example_loss=per_example_loss)
# Now we construct the copy model.
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses1 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=False)
# Now check that we did not accidentally set reuse.
self.assertEqual(False, tf.get_variable_scope().reuse)
# Construct one more model with per-example loss.
tf.get_variable_scope().reuse_variables()
_, losses2 = SampleGRUSeq2Seq(inp, out, weights, per_example_loss=True)
# First loss is scalar, the second one is a 1-dimensinal tensor.
self.assertEqual([], losses1[0].get_shape().as_list())
self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = tf.get_variable("proj_w", [24, classes])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2,
state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols=classes,
num_decoder_symbols=classes, embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes)
return tf.nn.seq2seq.model_with_buckets(
enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
weights = [tf.ones_like(inp[0], dtype=tf.float32) for _ in range(8)]
with tf.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = tf.all_variables()
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = tf.gradients(losses[i], params)
grads, _ = tf.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([tf.initialize_all_variables()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [np.array([np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length],
out[:length], o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=tf.Graph()) as sess:
tf.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_enc_timesteps)]
dec_inp_fp_true = [tf.constant(i, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
dec_inp_holder_fp_false = [tf.placeholder(tf.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)]
targets = [tf.constant(i + 1, tf.int32, shape=[batch_size])
for i in range(num_dec_timesteps)]
weights = [tf.constant(1.0, shape=[batch_size])
for i in range(num_dec_timesteps)]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with tf.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
scope_name)
optimizer = tf.train.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
tf.nn.seq2seq.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
dec_op_fp_false, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(tf.initialize_all_variables())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {v.name.split('/', 1)[-1]: v
for v in variables_fp_false}
matched_variables = [(v, v_false_name_dict[v.name.split('/', 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(tf.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false,
{holder: inp for holder, inp in zip(dec_inp_holder_fp_false,
dec_inp_fp_false)})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_rnn_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return tf.nn.seq2seq.embedding_attention_seq2seq(
enc_inp, dec_inp, cell, num_encoder_symbols,
num_decoder_symbols, embedding_size=2, feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
tf.test.main()
|
antsmc2/mics
|
refs/heads/master
|
survey/fixtures/__init__.py
|
12133432
| |
redhatrises/freeipa
|
refs/heads/master
|
ipapython/__init__.py
|
12133432
| |
Slater-Victoroff/scrapy
|
refs/heads/master
|
scrapy/command.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.command` is deprecated, "
"use `scrapy.commands` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.commands import *
|
nju520/django
|
refs/heads/master
|
tests/fixtures_regress/tests.py
|
205
|
# -*- coding: utf-8 -*-
# Unittests for fixtures.
from __future__ import unicode_literals
import json
import os
import re
import unittest
import warnings
import django
from django.core import management, serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import CommandError
from django.core.serializers.base import DeserializationError
from django.db import IntegrityError, transaction
from django.db.models import signals
from django.test import (
TestCase, TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils import six
from django.utils._os import upath
from django.utils.six import PY3, StringIO
from .models import (
Absolute, Animal, Article, Book, Child, Circle1, Circle2, Circle3,
ExternalDependency, M2MCircular1ThroughAB, M2MCircular1ThroughBC,
M2MCircular1ThroughCA, M2MCircular2ThroughAB, M2MComplexA, M2MComplexB,
M2MComplexCircular1A, M2MComplexCircular1B, M2MComplexCircular1C,
M2MComplexCircular2A, M2MComplexCircular2B, M2MSimpleA, M2MSimpleB,
M2MSimpleCircularA, M2MSimpleCircularB, M2MThroughAB, NKChild, Parent,
Person, RefToNKChild, Store, Stuff, Thingy, Widget,
)
_cur_dir = os.path.dirname(os.path.abspath(upath(__file__)))
def is_ascii(s):
return all(ord(c) < 128 for c in s)
skipIfNonASCIIPath = unittest.skipIf(
not is_ascii(django.__file__) and six.PY2,
'Python 2 crashes when checking non-ASCII exception messages.'
)
class TestFixtures(TestCase):
def animal_pre_save_check(self, signal, sender, instance, **kwargs):
self.pre_save_checks.append(
(
'Count = %s (%s)' % (instance.count, type(instance.count)),
'Weight = %s (%s)' % (instance.weight, type(instance.weight)),
)
)
def test_duplicate_pk(self):
"""
This is a regression test for ticket #3790.
"""
# Load a fixture that uses PK=1
management.call_command(
'loaddata',
'sequence',
verbosity=0,
)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2,
)
animal.save()
self.assertGreater(animal.id, 1)
def test_loaddata_not_found_fields_not_ignore(self):
"""
Test for ticket #9279 -- Error is raised for entries in
the serialized data for fields that have been removed
from the database when not ignored.
"""
with self.assertRaises(DeserializationError):
management.call_command(
'loaddata',
'sequence_extra',
verbosity=0,
)
def test_loaddata_not_found_fields_ignore(self):
"""
Test for ticket #9279 -- Ignores entries in
the serialized data for fields that have been removed
from the database.
"""
management.call_command(
'loaddata',
'sequence_extra',
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, 'Lion')
def test_loaddata_not_found_fields_ignore_xml(self):
"""
Test for ticket #19998 -- Ignore entries in the XML serialized data
for fields that have been removed from the model definition.
"""
management.call_command(
'loaddata',
'sequence_extra_xml',
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, 'Wolf')
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, None)
self.assertEqual(Stuff.objects.all()[0].owner, None)
@skipUnlessDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml_empty_strings(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, '')
self.assertEqual(Stuff.objects.all()[0].owner, None)
def test_absolute_path(self):
"""
Regression test for ticket #6436 --
os.path.join will throw away the initial parts of a path if it
encounters an absolute path.
This means that if a fixture is specified as an absolute path,
we need to make sure we don't discover the absolute path in every
fixture directory.
"""
load_absolute_path = os.path.join(
os.path.dirname(upath(__file__)),
'fixtures',
'absolute.json'
)
management.call_command(
'loaddata',
load_absolute_path,
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_relative_path(self, path=['fixtures', 'absolute.json']):
relative_path = os.path.join(*path)
cwd = os.getcwd()
try:
os.chdir(_cur_dir)
management.call_command(
'loaddata',
relative_path,
verbosity=0,
)
finally:
os.chdir(cwd)
self.assertEqual(Absolute.objects.count(), 1)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1')])
def test_relative_path_in_fixture_dirs(self):
self.test_relative_path(path=['inner', 'absolute.json'])
def test_path_containing_dots(self):
management.call_command(
'loaddata',
'path.containing.dots.json',
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_unknown_format(self):
"""
Test for ticket #4371 -- Loading data of an unknown format should fail
Validate that error conditions are caught correctly
"""
with six.assertRaisesRegex(self, management.CommandError,
"Problem installing fixture 'bad_fixture1': "
"unkn is not a known serialization format."):
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
)
@skipIfNonASCIIPath
@override_settings(SERIALIZATION_MODULES={'unkn': 'unexistent.path'})
def test_unimportable_serializer(self):
"""
Test that failing serializer import raises the proper error
"""
with six.assertRaisesRegex(self, ImportError,
r"No module named.*unexistent"):
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
)
def test_invalid_data(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
using explicit filename.
Test for ticket #18213 -- warning conditions are caught correctly
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2.xml',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'bad_fixture2'. (File format may be invalid.)")
def test_invalid_data_no_ext(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
without file extension.
Test for ticket #18213 -- warning conditions are caught correctly
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'bad_fixture2'. (File format may be invalid.)")
def test_empty(self):
"""
Test for ticket #18213 -- Loading a fixture file with no data output a warning.
Previously empty fixture raises an error exception, see ticket #4371.
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'empty',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'empty'. (File format may be invalid.)")
def test_error_message(self):
"""
Regression for #9011 - error message is correct.
Change from error to warning for ticket #18213.
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2',
'animal',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message), "No fixture data found for 'bad_fixture2'. (File format may be invalid.)")
def test_pg_sequence_resetting_checks(self):
"""
Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
ascend to parent models when inheritance is used
(since they are treated individually).
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
self.assertEqual(Parent.objects.all()[0].id, 1)
self.assertEqual(Child.objects.all()[0].id, 1)
def test_close_connection_after_loaddata(self):
"""
Test for ticket #7572 -- MySQL has a problem if the same connection is
used to create tables, load data, and then query over that data.
To compensate, we close the connection after running loaddata.
This ensures that a new connection is opened when test queries are
issued.
"""
management.call_command(
'loaddata',
'big-fixture.json',
verbosity=0,
)
articles = Article.objects.exclude(id=9)
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
# Just for good measure, run the same query again.
# Under the influence of ticket #7572, this will
# give a different result to the previous call.
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_field_value_coerce(self):
"""
Test for tickets #8298, #9942 - Field values should be coerced into the
correct type by the deserializer, not as part of the database write.
"""
self.pre_save_checks = []
signals.pre_save.connect(self.animal_pre_save_check)
try:
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
)
self.assertEqual(
self.pre_save_checks,
[
("Count = 42 (<%s 'int'>)" % ('class' if PY3 else 'type'),
"Weight = 1.2 (<%s 'float'>)" % ('class' if PY3 else 'type'))
]
)
finally:
signals.pre_save.disconnect(self.animal_pre_save_check)
def test_dumpdata_uses_default_manager(self):
"""
Regression for #11286
Ensure that dumpdata honors the default manager
Dump the current contents of the database as a JSON fixture
"""
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
)
management.call_command(
'loaddata',
'sequence.json',
verbosity=0,
)
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2,
)
animal.save()
out = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.animal',
format='json',
stdout=out,
)
# Output order isn't guaranteed, so check for parts
data = out.getvalue()
# Get rid of artifacts like '000000002' to eliminate the differences
# between different Python versions.
data = re.sub('0{6,}[0-9]', '', data)
animals_data = sorted([
{"pk": 1, "model": "fixtures_regress.animal", "fields": {"count": 3, "weight": 1.2, "name": "Lion", "latin_name": "Panthera leo"}},
{"pk": 10, "model": "fixtures_regress.animal", "fields": {"count": 42, "weight": 1.2, "name": "Emu", "latin_name": "Dromaius novaehollandiae"}},
{"pk": animal.pk, "model": "fixtures_regress.animal", "fields": {"count": 2, "weight": 2.2, "name": "Platypus", "latin_name": "Ornithorhynchus anatinus"}},
], key=lambda x: x["pk"])
data = sorted(json.loads(data), key=lambda x: x["pk"])
self.maxDiff = 1024
self.assertEqual(data, animals_data)
def test_proxy_model_included(self):
"""
Regression for #11428 - Proxy models aren't included when you dumpdata
"""
out = StringIO()
# Create an instance of the concrete class
widget = Widget.objects.create(name='grommet')
management.call_command(
'dumpdata',
'fixtures_regress.widget',
'fixtures_regress.widgetproxy',
format='json',
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
"""[{"pk": %d, "model": "fixtures_regress.widget", "fields": {"name": "grommet"}}]"""
% widget.pk
)
@skipUnlessDBFeature('supports_forward_references')
def test_loaddata_works_when_fixture_has_forward_refs(self):
"""
Regression for #3615 - Forward references cause fixtures not to load in MySQL (InnoDB)
"""
management.call_command(
'loaddata',
'forward_ref.json',
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_raises_error_when_fixture_has_invalid_foreign_key(self):
"""
Regression for #3615 - Ensure data with nonexistent child key references raises error
"""
with six.assertRaisesRegex(self, IntegrityError,
"Problem installing fixture"):
management.call_command(
'loaddata',
'forward_ref_bad_data.json',
verbosity=0,
)
@skipUnlessDBFeature('supports_forward_references')
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_2')])
def test_loaddata_forward_refs_split_fixtures(self):
"""
Regression for #17530 - should be able to cope with forward references
when the fixtures are not in the same files or directories.
"""
management.call_command(
'loaddata',
'forward_ref_1.json',
'forward_ref_2.json',
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_no_fixture_specified(self):
"""
Regression for #7043 - Error is quickly reported when no fixtures is provided in the command line.
"""
with six.assertRaisesRegex(self, management.CommandError,
"No database fixture specified. Please provide the path of "
"at least one fixture in the command line."):
management.call_command(
'loaddata',
verbosity=0,
)
def test_ticket_20820(self):
"""
Regression for ticket #20820 -- loaddata on a model that inherits
from a model with a M2M shouldn't blow up.
"""
management.call_command(
'loaddata',
'special-article.json',
verbosity=0,
)
def test_ticket_22421(self):
"""
Regression for ticket #22421 -- loaddata on a model that inherits from
a grand-parent model with a M2M but via an abstract parent shouldn't
blow up.
"""
management.call_command(
'loaddata',
'feature.json',
verbosity=0,
)
def test_loaddata_with_m2m_to_self(self):
"""
Regression test for ticket #17946.
"""
management.call_command(
'loaddata',
'm2mtoself.json',
verbosity=0,
)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_1')])
def test_fixture_dirs_with_duplicates(self):
"""
settings.FIXTURE_DIRS cannot contain duplicates in order to avoid
repeated fixture loading.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
"settings.FIXTURE_DIRS contains duplicates.",
management.call_command,
'loaddata',
'absolute.json',
verbosity=0,
)
@skipIfNonASCIIPath
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures')])
def test_fixture_dirs_with_default_fixture_path(self):
"""
settings.FIXTURE_DIRS cannot contain a default fixtures directory
for application (app/fixtures) in order to avoid repeated fixture loading.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS."
% (os.path.join(_cur_dir, 'fixtures'), 'fixtures_regress'),
management.call_command,
'loaddata',
'absolute.json',
verbosity=0,
)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_2')])
def test_loaddata_with_valid_fixture_dirs(self):
management.call_command(
'loaddata',
'absolute.json',
verbosity=0,
)
class NaturalKeyFixtureTests(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
)
self.assertEqual(
NKChild.objects.get(pk=1).data,
'apple'
)
self.assertEqual(
RefToNKChild.objects.get(pk=1).nk_fk.data,
'apple'
)
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance2.xml',
verbosity=0,
)
self.assertEqual(
NKChild.objects.get(pk=2).data,
'banana'
)
self.assertEqual(
RefToNKChild.objects.get(pk=2).nk_fk.data,
'apple'
)
def test_nk_on_serialize(self):
"""
Check that natural key requirements are taken into account
when serializing models
"""
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
)
out = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.book',
'fixtures_regress.person',
'fixtures_regress.store',
verbosity=0,
format='json',
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
"""[{"fields": {"main": null, "name": "Amazon"}, "model": "fixtures_regress.store"}, {"fields": {"main": null, "name": "Borders"}, "model": "fixtures_regress.store"}, {"fields": {"name": "Neal Stephenson"}, "model": "fixtures_regress.person"}, {"pk": 1, "model": "fixtures_regress.book", "fields": {"stores": [["Amazon"], ["Borders"]], "name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]"""
)
def test_dependency_sorting(self):
"""
Now lets check the dependency sorting explicitly
It doesn't matter what order you mention the models
Store *must* be serialized before then Person, and both
must be serialized before Book.
"""
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Book, Person, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_2(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Book, Store, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_3(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Store, Book, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_4(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Store, Person, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_5(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, Book, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_6(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_dangling(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, Circle1, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Circle1, Store, Person, Book]
)
def test_dependency_sorting_tight_circular(self):
self.assertRaisesMessage(
RuntimeError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
serializers.sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Store, Book])],
)
def test_dependency_sorting_tight_circular_2(self):
self.assertRaisesMessage(
RuntimeError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
serializers.sort_dependencies,
[('fixtures_regress', [Circle1, Book, Circle2])],
)
def test_dependency_self_referential(self):
self.assertRaisesMessage(
RuntimeError,
"""Can't resolve dependencies for fixtures_regress.Circle3 in serialized app list.""",
serializers.sort_dependencies,
[('fixtures_regress', [Book, Circle3])],
)
def test_dependency_sorting_long(self):
self.assertRaisesMessage(
RuntimeError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized app list.""",
serializers.sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Circle3, Store, Book])],
)
def test_dependency_sorting_normal(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, ExternalDependency, Book])]
)
self.assertEqual(
sorted_deps,
[Person, Book, ExternalDependency]
)
def test_normal_pk(self):
"""
Check that normal primary keys still work
on a model with natural key capabilities
"""
management.call_command(
'loaddata',
'non_natural_1.json',
verbosity=0,
)
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
)
management.call_command(
'loaddata',
'non_natural_2.xml',
verbosity=0,
)
books = Book.objects.all()
self.assertEqual(
books.__repr__(),
"""[<Book: Cryptonomicon by Neal Stephenson (available at Amazon, Borders)>, <Book: Ender's Game by Orson Scott Card (available at Collins Bookstore)>, <Book: Permutation City by Greg Egan (available at Angus and Robertson)>]"""
)
class M2MNaturalKeyFixtureTests(TestCase):
"""Tests for ticket #14426."""
def test_dependency_sorting_m2m_simple(self):
"""
M2M relations without explicit through models SHOULD count as dependencies
Regression test for bugs that could be caused by flawed fixes to
#14226, namely if M2M checks are removed from sort_dependencies
altogether.
"""
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [M2MSimpleA, M2MSimpleB])]
)
self.assertEqual(sorted_deps, [M2MSimpleB, M2MSimpleA])
def test_dependency_sorting_m2m_simple_circular(self):
"""
Resolving circular M2M relations without explicit through models should
fail loudly
"""
self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.M2MSimpleCircularA, "
"fixtures_regress.M2MSimpleCircularB in serialized app list.",
serializers.sort_dependencies,
[('fixtures_regress', [M2MSimpleCircularA, M2MSimpleCircularB])]
)
def test_dependency_sorting_m2m_complex(self):
"""
M2M relations with explicit through models should NOT count as
dependencies. The through model itself will have dependencies, though.
"""
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [M2MComplexA, M2MComplexB, M2MThroughAB])]
)
# Order between M2MComplexA and M2MComplexB doesn't matter. The through
# model has dependencies to them though, so it should come last.
self.assertEqual(sorted_deps[-1], M2MThroughAB)
def test_dependency_sorting_m2m_complex_circular_1(self):
"""
Circular M2M relations with explicit through models should be serializable
"""
A, B, C, AtoB, BtoC, CtoA = (M2MComplexCircular1A, M2MComplexCircular1B,
M2MComplexCircular1C, M2MCircular1ThroughAB,
M2MCircular1ThroughBC, M2MCircular1ThroughCA)
try:
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [A, B, C, AtoB, BtoC, CtoA])]
)
except CommandError:
self.fail("Serialization dependency solving algorithm isn't "
"capable of handling circular M2M setups with "
"intermediate models.")
# The dependency sorting should not result in an error, and the
# through model should have dependencies to the other models and as
# such come last in the list.
self.assertEqual(sorted_deps[:3], [A, B, C])
self.assertEqual(sorted_deps[3:], [AtoB, BtoC, CtoA])
def test_dependency_sorting_m2m_complex_circular_2(self):
"""
Circular M2M relations with explicit through models should be serializable
This test tests the circularity with explicit natural_key.dependencies
"""
try:
sorted_deps = serializers.sort_dependencies([
('fixtures_regress', [
M2MComplexCircular2A,
M2MComplexCircular2B,
M2MCircular2ThroughAB])
])
except CommandError:
self.fail("Serialization dependency solving algorithm isn't "
"capable of handling circular M2M setups with "
"intermediate models plus natural key dependency hints.")
self.assertEqual(sorted_deps[:2], [M2MComplexCircular2A, M2MComplexCircular2B])
self.assertEqual(sorted_deps[2:], [M2MCircular2ThroughAB])
def test_dump_and_load_m2m_simple(self):
"""
Test serializing and deserializing back models with simple M2M relations
"""
a = M2MSimpleA.objects.create(data="a")
b1 = M2MSimpleB.objects.create(data="b1")
b2 = M2MSimpleB.objects.create(data="b2")
a.b_set.add(b1)
a.b_set.add(b2)
out = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.M2MSimpleA',
'fixtures_regress.M2MSimpleB',
use_natural_foreign_keys=True,
stdout=out,
)
for model in [M2MSimpleA, M2MSimpleB]:
model.objects.all().delete()
objects = serializers.deserialize("json", out.getvalue())
for obj in objects:
obj.save()
new_a = M2MSimpleA.objects.get_by_natural_key("a")
self.assertQuerysetEqual(new_a.b_set.all(), [
"<M2MSimpleB: b1>",
"<M2MSimpleB: b2>"
], ordered=False)
class TestTicket11101(TransactionTestCase):
available_apps = [
'fixtures_regress',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@skipUnlessDBFeature('supports_transactions')
def test_ticket_11101(self):
"""Test that fixtures can be rolled back (ticket #11101)."""
with transaction.atomic():
management.call_command(
'loaddata',
'thingy.json',
verbosity=0,
)
self.assertEqual(Thingy.objects.count(), 1)
transaction.set_rollback(True)
self.assertEqual(Thingy.objects.count(), 0)
class TestLoadFixtureFromOtherAppDirectory(TestCase):
"""
#23612 -- fixtures path should be normalized to allow referencing relative
paths on Windows.
"""
current_dir = os.path.abspath(os.path.dirname(__file__))
# relative_prefix is something like tests/fixtures_regress or
# fixtures_regress depending on how runtests.py is invoked.
# All path separators must be / in order to be a proper regression test on
# Windows, so replace as appropriate.
relative_prefix = os.path.relpath(current_dir, os.getcwd()).replace('\\', '/')
fixtures = [relative_prefix + '/fixtures/absolute.json']
def test_fixtures_loaded(self):
count = Absolute.objects.count()
self.assertGreater(count, 0, "Fixtures not loaded properly.")
|
markrawlingson/SickRage
|
refs/heads/master
|
lib/sqlalchemy/sql/operators.py
|
78
|
# sql/operators.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from .. import util
from operator import (
and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg,
getitem, lshift, rshift
)
if util.py2k:
from operator import div
else:
div = truediv
class Operators(object):
"""Base of comparison and logical operators.
Implements base methods :meth:`~sqlalchemy.sql.operators.Operators.operate` and
:meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as
:meth:`~sqlalchemy.sql.operators.Operators.__and__`,
:meth:`~sqlalchemy.sql.operators.Operators.__or__`,
:meth:`~sqlalchemy.sql.operators.Operators.__invert__`.
Usually is used via its most common subclass
:class:`.ColumnOperators`.
"""
def __and__(self, other):
"""Implement the ``&`` operator.
When used with SQL expressions, results in an
AND operation, equivalent to
:func:`~.expression.and_`, that is::
a & b
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
operator precedence; the ``&`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) & (b == 4)
"""
return self.operate(and_, other)
def __or__(self, other):
"""Implement the ``|`` operator.
When used with SQL expressions, results in an
OR operation, equivalent to
:func:`~.expression.or_`, that is::
a | b
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
operator precedence; the ``|`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) | (b == 4)
"""
return self.operate(or_, other)
def __invert__(self):
"""Implement the ``~`` operator.
When used with SQL expressions, results in a
NOT operation, equivalent to
:func:`~.expression.not_`, that is::
~a
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
return self.operate(inv)
def op(self, opstring, precedence=0, is_comparison=False):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in ``somecolumn``.
:param operator: a string which will be output as the infix operator
between this element and the expression passed to the
generated function.
:param precedence: precedence to apply to the operator, when
parenthesizing expressions. A lower number will cause the expression
to be parenthesized when applied against another operator with
higher precedence. The default value of ``0`` is lower than all
operators except for the comma (``,``) and ``AS`` operators.
A value of 100 will be higher or equal to all operators, and -100
will be lower than or equal to all operators.
.. versionadded:: 0.8 - added the 'precedence' argument.
:param is_comparison: if True, the operator will be considered as a
"comparison" operator, that is which evaulates to a boolean true/false
value, like ``==``, ``>``, etc. This flag should be set so that
ORM relationships can establish that the operator is a comparison
operator when used in a custom join condition.
.. versionadded:: 0.9.2 - added the :paramref:`.Operators.op.is_comparison`
flag.
.. seealso::
:ref:`types_operators`
:ref:`relationship_custom_operator`
"""
operator = custom_op(opstring, precedence, is_comparison)
def against(other):
return operator(self, other)
return against
def operate(self, op, *other, **kwargs):
"""Operate on an argument.
This is the lowest level of operation, raises
:class:`NotImplementedError` by default.
Overriding this on a subclass can allow common
behavior to be applied to all operations.
For example, overriding :class:`.ColumnOperators`
to apply ``func.lower()`` to the left and right
side::
class MyComparator(ColumnOperators):
def operate(self, op, other):
return op(func.lower(self), func.lower(other))
:param op: Operator callable.
:param \*other: the 'other' side of the operation. Will
be a single scalar for most operations.
:param \**kwargs: modifiers. These may be passed by special
operators such as :meth:`ColumnOperators.contains`.
"""
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
"""Reverse operate on an argument.
Usage is the same as :meth:`operate`.
"""
raise NotImplementedError(str(op))
class custom_op(object):
"""Represent a 'custom' operator.
:class:`.custom_op` is normally instantitated when the
:meth:`.ColumnOperators.op` method is used to create a
custom operator callable. The class can also be used directly
when programmatically constructing expressions. E.g.
to represent the "factorial" operation::
from sqlalchemy.sql import UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
"""
__name__ = 'custom_op'
def __init__(self, opstring, precedence=0, is_comparison=False):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
def __eq__(self, other):
return isinstance(other, custom_op) and \
other.opstring == self.opstring
def __hash__(self):
return id(self)
def __call__(self, left, right, **kw):
return left.operate(self, right, **kw)
class ColumnOperators(Operators):
"""Defines boolean, comparison, and other operators for
:class:`.ColumnElement` expressions.
By default, all methods call down to
:meth:`.operate` or :meth:`.reverse_operate`,
passing in the appropriate operator function from the
Python builtin ``operator`` module or
a SQLAlchemy-specific operator function from
:mod:`sqlalchemy.expression.operators`. For example
the ``__eq__`` function::
def __eq__(self, other):
return self.operate(operators.eq, other)
Where ``operators.eq`` is essentially::
def eq(a, b):
return a == b
The core column expression unit :class:`.ColumnElement`
overrides :meth:`.Operators.operate` and others
to return further :class:`.ColumnElement` constructs,
so that the ``==`` operation above is replaced by a clause
construct.
See also:
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
:class:`.ColumnOperators`
:class:`.PropComparator`
"""
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
"""Implement the ``<`` operator.
In a column context, produces the clause ``a < b``.
"""
return self.operate(lt, other)
def __le__(self, other):
"""Implement the ``<=`` operator.
In a column context, produces the clause ``a <= b``.
"""
return self.operate(le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
"""Implement the ``==`` operator.
In a column context, produces the clause ``a = b``.
If the target is ``None``, produces ``a IS NULL``.
"""
return self.operate(eq, other)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a column context, produces the clause ``a != b``.
If the target is ``None``, produces ``a IS NOT NULL``.
"""
return self.operate(ne, other)
def __gt__(self, other):
"""Implement the ``>`` operator.
In a column context, produces the clause ``a > b``.
"""
return self.operate(gt, other)
def __ge__(self, other):
"""Implement the ``>=`` operator.
In a column context, produces the clause ``a >= b``.
"""
return self.operate(ge, other)
def __neg__(self):
"""Implement the ``-`` operator.
In a column context, produces the clause ``-a``.
"""
return self.operate(neg)
def __getitem__(self, index):
"""Implement the [] operator.
This can be used by some database-specific types
such as Postgresql ARRAY and HSTORE.
"""
return self.operate(getitem, index)
def __lshift__(self, other):
"""implement the << operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
<< as an extension point.
"""
return self.operate(lshift, other)
def __rshift__(self, other):
"""implement the >> operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
>> as an extension point.
"""
return self.operate(rshift, other)
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
def like(self, other, escape=None):
"""Implement the ``like`` operator.
In a column context, produces the clause ``a LIKE other``.
E.g.::
select([sometable]).where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.like("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(like_op, other, escape=escape)
def ilike(self, other, escape=None):
"""Implement the ``ilike`` operator.
In a column context, produces the clause ``a ILIKE other``.
E.g.::
select([sometable]).where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.ilike("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(ilike_op, other, escape=escape)
def in_(self, other):
"""Implement the ``in`` operator.
In a column context, produces the clause ``a IN other``.
"other" may be a tuple/list of column expressions,
or a :func:`~.expression.select` construct.
"""
return self.operate(in_op, other)
def notin_(self, other):
"""implement the ``NOT IN`` operator.
This is equivalent to using negation with :meth:`.ColumnOperators.in_`,
i.e. ``~x.in_(y)``.
.. versionadded:: 0.8
.. seealso::
:meth:`.ColumnOperators.in_`
"""
return self.operate(notin_op, other)
def notlike(self, other, escape=None):
"""implement the ``NOT LIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``.
.. versionadded:: 0.8
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(notlike_op, other, escape=escape)
def notilike(self, other, escape=None):
"""implement the ``NOT ILIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``.
.. versionadded:: 0.8
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(notilike_op, other, escape=escape)
def is_(self, other):
"""Implement the ``IS`` operator.
Normally, ``IS`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS`` may be desirable if comparing to boolean values
on certain platforms.
.. versionadded:: 0.7.9
.. seealso:: :meth:`.ColumnOperators.isnot`
"""
return self.operate(is_, other)
def isnot(self, other):
"""Implement the ``IS NOT`` operator.
Normally, ``IS NOT`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS NOT`` may be desirable if comparing to boolean values
on certain platforms.
.. versionadded:: 0.7.9
.. seealso:: :meth:`.ColumnOperators.is_`
"""
return self.operate(isnot, other)
def startswith(self, other, **kwargs):
"""Implement the ``startwith`` operator.
In a column context, produces the clause ``LIKE '<other>%'``
"""
return self.operate(startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
"""Implement the 'endswith' operator.
In a column context, produces the clause ``LIKE '%<other>'``
"""
return self.operate(endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
"""Implement the 'contains' operator.
In a column context, produces the clause ``LIKE '%<other>%'``
"""
return self.operate(contains_op, other, **kwargs)
def match(self, other, **kwargs):
"""Implements the 'match' operator.
In a column context, this produces a MATCH clause, i.e.
``MATCH '<other>'``. The allowed contents of ``other``
are database backend specific.
"""
return self.operate(match_op, other, **kwargs)
def desc(self):
"""Produce a :func:`~.expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self):
"""Produce a :func:`~.expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nullsfirst(self):
"""Produce a :func:`~.expression.nullsfirst` clause against the
parent object."""
return self.operate(nullsfirst_op)
def nullslast(self):
"""Produce a :func:`~.expression.nullslast` clause against the
parent object."""
return self.operate(nullslast_op)
def collate(self, collation):
"""Produce a :func:`~.expression.collate` clause against
the parent object, given the collation string."""
return self.operate(collate, collation)
def __radd__(self, other):
"""Implement the ``+`` operator in reverse.
See :meth:`.ColumnOperators.__add__`.
"""
return self.reverse_operate(add, other)
def __rsub__(self, other):
"""Implement the ``-`` operator in reverse.
See :meth:`.ColumnOperators.__sub__`.
"""
return self.reverse_operate(sub, other)
def __rmul__(self, other):
"""Implement the ``*`` operator in reverse.
See :meth:`.ColumnOperators.__mul__`.
"""
return self.reverse_operate(mul, other)
def __rdiv__(self, other):
"""Implement the ``/`` operator in reverse.
See :meth:`.ColumnOperators.__div__`.
"""
return self.reverse_operate(div, other)
def between(self, cleft, cright):
"""Produce a :func:`~.expression.between` clause against
the parent object, given the lower and upper range."""
return self.operate(between_op, cleft, cright)
def distinct(self):
"""Produce a :func:`~.expression.distinct` clause against the
parent object.
"""
return self.operate(distinct_op)
def __add__(self, other):
"""Implement the ``+`` operator.
In a column context, produces the clause ``a + b``
if the parent object has non-string affinity.
If the parent object has a string affinity,
produces the concatenation operator, ``a || b`` -
see :meth:`.ColumnOperators.concat`.
"""
return self.operate(add, other)
def __sub__(self, other):
"""Implement the ``-`` operator.
In a column context, produces the clause ``a - b``.
"""
return self.operate(sub, other)
def __mul__(self, other):
"""Implement the ``*`` operator.
In a column context, produces the clause ``a * b``.
"""
return self.operate(mul, other)
def __div__(self, other):
"""Implement the ``/`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(div, other)
def __mod__(self, other):
"""Implement the ``%`` operator.
In a column context, produces the clause ``a % b``.
"""
return self.operate(mod, other)
def __truediv__(self, other):
"""Implement the ``//`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(truediv, other)
def __rtruediv__(self, other):
"""Implement the ``//`` operator in reverse.
See :meth:`.ColumnOperators.__truediv__`.
"""
return self.reverse_operate(truediv, other)
def from_():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def istrue(a):
raise NotImplementedError()
def isfalse(a):
raise NotImplementedError()
def is_(a, b):
return a.is_(b)
def isnot(a, b):
return a.isnot(b)
def collate(a, b):
return a.collate(b)
def op(a, opstring, b):
return a.op(opstring)(b)
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
def notlike_op(a, b, escape=None):
return a.notlike(b, escape=escape)
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
def notilike_op(a, b, escape=None):
return a.notilike(b, escape=escape)
def between_op(a, b, c):
return a.between(b, c)
def in_op(a, b):
return a.in_(b)
def notin_op(a, b):
return a.notin_(b)
def distinct_op(a):
return a.distinct()
def startswith_op(a, b, escape=None):
return a.startswith(b, escape=escape)
def notstartswith_op(a, b, escape=None):
return ~a.startswith(b, escape=escape)
def endswith_op(a, b, escape=None):
return a.endswith(b, escape=escape)
def notendswith_op(a, b, escape=None):
return ~a.endswith(b, escape=escape)
def contains_op(a, b, escape=None):
return a.contains(b, escape=escape)
def notcontains_op(a, b, escape=None):
return ~a.contains(b, escape=escape)
def match_op(a, b):
return a.match(b)
def comma_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
def nullsfirst_op(a):
return a.nullsfirst()
def nullslast_op(a):
return a.nullslast()
_commutative = set([eq, ne, add, mul])
_comparison = set([eq, ne, lt, gt, ge, le, between_op])
def is_comparison(op):
return op in _comparison or \
isinstance(op, custom_op) and op.is_comparison
def is_commutative(op):
return op in _commutative
def is_ordering_modifier(op):
return op in (asc_op, desc_op,
nullsfirst_op, nullslast_op)
_associative = _commutative.union([concat_op, and_, or_])
_natural_self_precedent = _associative.union([getitem])
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
"""
_asbool = util.symbol('_asbool', canonical=-10)
_smallest = util.symbol('_smallest', canonical=-100)
_largest = util.symbol('_largest', canonical=100)
_PRECEDENCE = {
from_: 15,
getitem: 15,
mul: 8,
truediv: 8,
div: 8,
mod: 8,
neg: 8,
add: 7,
sub: 7,
concat_op: 6,
match_op: 6,
ilike_op: 6,
notilike_op: 6,
like_op: 6,
notlike_op: 6,
in_op: 6,
notin_op: 6,
is_: 6,
isnot: 6,
eq: 5,
ne: 5,
gt: 5,
lt: 5,
ge: 5,
le: 5,
between_op: 5,
distinct_op: 5,
inv: 5,
istrue: 5,
isfalse: 5,
and_: 3,
or_: 2,
comma_op: -1,
desc_op: 3,
asc_op: 3,
collate: 4,
as_: -1,
exists: 0,
_asbool: -10,
_smallest: _smallest,
_largest: _largest
}
def is_precedent(operator, against):
if operator is against and operator in _natural_self_precedent:
return False
else:
return (_PRECEDENCE.get(operator,
getattr(operator, 'precedence', _smallest)) <=
_PRECEDENCE.get(against,
getattr(against, 'precedence', _largest)))
|
release-engineering/fedmsg_meta_umb
|
refs/heads/master
|
fedmsg_meta_umb/greenwave.py
|
1
|
# Copyright (C) 2018 Red Hat, Inc.
#
# fedmsg_meta_umb is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg_meta_umb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Giulia Naponiello <gnaponie@redhat.com>
from fedmsg.meta.base import BaseProcessor
class GreenwaveProcessor(BaseProcessor):
topic_prefix_re = r'/topic/VirtualTopic\.eng'
__name__ = 'greenwave'
__description__ = 'Greenwave Gating Service'
__link__ = 'https://greenwave.engineering.redhat.com/api/v1.0/policies'
__obj__ = 'Gating Decisions'
__docs__ = 'https://mojo.redhat.com/docs/DOC-1171796'
__icon__ = '_static/img/icons/greenwave.png'
@staticmethod
def satisfied(msg):
return msg['msg']['policies_satisfied']
def link(self, msg, **config):
subject = msg['msg']['subject']
base = "https://pipeline.engineering.redhat.com/"
koji_types = ('brew-build', 'koji_build')
for entry in subject:
if entry.get('type') in koji_types:
return base + "kojibuild/" + entry.get('item')
def title(self, msg, **config):
return msg['topic'].split('.', 2)[-1]
def subtitle(self, msg, **config):
if self.satisfied(msg):
decision = self._("is a GO")
else:
decision = self._("says NO-GO")
tmpl = self._(
"{summary} for {item}, "
"\"{decision_context}\" ({product_version})"
)
subject = msg['msg']['subject']
items = [entry.get('item') for entry in subject if entry.get('item')]
item = items[0] if items else "\"something\""
return tmpl.format(decision=decision, item=item, **msg['msg'])
def packages(self, msg, **config):
subject = msg['msg']['subject']
items = [
entry.get('item') for entry in subject
if entry.get('item') and entry.get('type') in ['brew-build', 'koji_build']
]
return set([item.rsplit('-', 2)[0] for item in items])
|
sachinkum/Bal-Aveksha
|
refs/heads/master
|
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/django/conf/locale/sr/formats.py
|
1008
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
tambetm/neon
|
refs/heads/master
|
neon/transforms/xcov.py
|
9
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
XCov cost functions and classes for balance networks
"""
from neon.transforms.cost import Cost
def xcov_cost(backend, outputs, targets, temp, blkidx,
scale_by_batchsize=False):
blk1 = outputs[0:blkidx]
blk2 = outputs[blkidx:]
backend.xcov(blk1, blk2, out=temp[2])
backend.multiply(temp[2], temp[2], temp[2])
if scale_by_batchsize:
backend.divide(temp[2], temp[2].shape[1], temp[2])
result = backend.empty((1, 1), dtype=outputs.dtype)
backend.sum(temp[2], axes=None, out=result)
return backend.multiply(result, 0.5, result)
def xcov_cost_derivative(backend, outputs, targets, temp, blkidx,
scale=1.0):
# temp[0] is k1 x n
# temp[1] is k2 x n
# temp[2] is k1 x k2
# temp[3] is (k1+k2) x n
n = outputs.shape[1]
blk1 = outputs[0:blkidx]
blk2 = outputs[blkidx:]
backend.mean_norm(blk1, axis=1, out=temp[0])
backend.xcov(blk1, blk2, out=temp[2])
backend.dot(temp[2].transpose(), temp[0], out=temp[1])
temp[3][blkidx:] = temp[1]
backend.mean_norm(blk2, axis=1, out=temp[1])
backend.dot(temp[2], temp[1], out=temp[0])
temp[3][:blkidx] = temp[0]
backend.multiply(temp[3], scale / n, out=temp[3])
return temp[3]
class XCovariance(Cost):
"""
Embodiment of a X covariance cost function.
"""
def __init__(self, **kwargs):
super(XCovariance, self).__init__(**kwargs)
for req_param in ['blkidx']:
if not hasattr(self, req_param):
raise ValueError("required parameter: %s not specified" %
req_param)
def initialize(self, kwargs):
super(XCovariance, self).initialize(kwargs)
if self.blkidx > self.outputbuf.shape[0]:
raise ValueError("blkidx %d too large" % self.blkidx)
def set_outputbuf(self, databuf):
if not self.outputbuf:
self.outputbuf = databuf
if self.outputbuf.shape != databuf.shape or not self.temp:
n = self.outputbuf.shape[1]
k1 = self.blkidx
k2 = self.outputbuf.shape[0]-k1
tempbuf1 = self.backend.empty((k1, n), self.temp_dtype)
tempbuf2 = self.backend.empty((k2, n), self.temp_dtype)
tempbuf3 = self.backend.empty((k1, k2), self.temp_dtype)
tempbuf4 = self.backend.empty(self.outputbuf.shape,
self.temp_dtype)
self.temp = [tempbuf1, tempbuf2, tempbuf3, tempbuf4]
self.outputbuf = databuf
def get_deltabuf(self):
return self.temp[3]
def apply_function(self, targets, scale_by_batchsize=False):
"""
Apply the xcov cost function to the datasets passed.
"""
result = xcov_cost(self.backend, self.outputbuf, targets, self.temp,
self.blkidx, scale_by_batchsize)
return self.backend.multiply(result, self.scale, out=result)
def apply_derivative(self, targets):
"""
Apply the derivative of the xcov cost function to the datasets
passed.
"""
return xcov_cost_derivative(self.backend, self.outputbuf, targets,
self.temp, self.blkidx, self.scale)
|
candrews/portage
|
refs/heads/master
|
pym/portage/tests/resolver/soname/__test__.py
|
70
|
# Copyright 2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
|
emonty/ansible-modules-core
|
refs/heads/devel
|
network/eos/eos_facts.py
|
19
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: eos_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote devices running Arista EOS
description:
- Collects a base set of device facts from a remote device that
is running eos. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: eos
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
# Collect all facts from the device
- eos_facts:
gather_subset: all
provider: "{{ cli }}"
# Collect only the config and default facts
- eos_facts:
gather_subset:
- config
provider: "{{ cli }}"
# Do not collect hardware facts
- eos_facts:
gather_subset:
- "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
ansible_net_image:
description: The image file the device is running
returned: always
type: str
ansible_net_fqdn:
description: The fully qualified domain name of the device
returned: always
type: str
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.netcli import CommandRunner, AddCommandError
from ansible.module_utils.six import iteritems
from ansible.module_utils.eos import NetworkModule
def add_command(runner, command, output=None):
try:
runner.add_command(command, output)
except AddCommandError:
# AddCommandError is raised for any issue adding a command to
# the runner. Silently ignore the exception in this case
pass
class FactsBase(object):
def __init__(self, runner):
self.runner = runner
self.facts = dict()
self.load_commands()
def load_commands(self):
raise NotImplementedError
class Default(FactsBase):
SYSTEM_MAP = {
'version': 'version',
'serialNumber': 'serialnum',
'modelName': 'model'
}
def load_commands(self):
add_command(self.runner, 'show version', output='json')
add_command(self.runner, 'show hostname', output='json')
add_command(self.runner, 'bash timeout 5 cat /mnt/flash/boot-config')
def populate(self):
data = self.runner.get_command('show version', 'json')
for key, value in iteritems(self.SYSTEM_MAP):
if key in data:
self.facts[value] = data[key]
self.facts.update(self.runner.get_command('show hostname', 'json'))
self.facts.update(self.parse_image())
def parse_image(self):
data = self.runner.get_command('bash timeout 5 cat /mnt/flash/boot-config')
if isinstance(data, dict):
data = data['messages'][0]
match = re.search(r'SWI=(.+)$', data, re.M)
if match:
value = match.group(1)
else:
value = None
return dict(image=value)
class Hardware(FactsBase):
def load_commands(self):
add_command(self.runner, 'dir all-filesystems', output='text')
add_command(self.runner, 'show version', output='json')
def populate(self):
self.facts.update(self.populate_filesystems())
self.facts.update(self.populate_memory())
def populate_filesystems(self):
data = self.runner.get_command('dir all-filesystems', 'text')
fs = re.findall(r'^Directory of (.+)/', data, re.M)
return dict(filesystems=fs)
def populate_memory(self):
values = self.runner.get_command('show version', 'json')
return dict(
memfree_mb=int(values['memFree']) / 1024,
memtotal_mb=int(values['memTotal']) / 1024
)
class Config(FactsBase):
def load_commands(self):
add_command(self.runner, 'show running-config', output='text')
def populate(self):
self.facts['config'] = self.runner.get_command('show running-config')
class Interfaces(FactsBase):
INTERFACE_MAP = {
'description': 'description',
'physicalAddress': 'macaddress',
'mtu': 'mtu',
'bandwidth': 'bandwidth',
'duplex': 'duplex',
'lineProtocolStatus': 'lineprotocol',
'interfaceStatus': 'operstatus',
'forwardingModel': 'type'
}
def load_commands(self):
add_command(self.runner, 'show interfaces', output='json')
add_command(self.runner, 'show lldp neighbors', output='json')
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.runner.get_command('show interfaces', 'json')
self.facts['interfaces'] = self.populate_interfaces(data)
data = self.runner.get_command('show lldp neighbors', 'json')
self.facts['neighbors'] = self.populate_neighbors(data['lldpNeighbors'])
def populate_interfaces(self, data):
facts = dict()
for key, value in iteritems(data['interfaces']):
intf = dict()
for remote, local in iteritems(self.INTERFACE_MAP):
if remote in value:
intf[local] = value[remote]
if 'interfaceAddress' in value:
intf['ipv4'] = dict()
for entry in value['interfaceAddress']:
intf['ipv4']['address'] = entry['primaryIp']['address']
intf['ipv4']['masklen'] = entry['primaryIp']['maskLen']
self.add_ip_address(entry['primaryIp']['address'], 'ipv4')
if 'interfaceAddressIp6' in value:
intf['ipv6'] = dict()
for entry in value['interfaceAddressIp6']['globalUnicastIp6s']:
intf['ipv6']['address'] = entry['address']
intf['ipv6']['subnet'] = entry['subnet']
self.add_ip_address(entry['address'], 'ipv6')
facts[key] = intf
return facts
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def populate_neighbors(self, neighbors):
facts = dict()
for value in neighbors:
port = value['port']
if port not in facts:
facts[port] = list()
lldp = dict()
lldp['host'] = value['neighborDevice']
lldp['port'] = value['neighborPort']
facts[port].append(lldp)
return facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](runner))
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
|
ekesken/istatistikciadamlazim
|
refs/heads/master
|
openid/test/test_discover.py
|
65
|
import sys
import unittest
import datadriven
import os.path
from openid import fetchers
from openid.fetchers import HTTPResponse
from openid.yadis.discover import DiscoveryFailure
from openid.consumer import discover
from openid.yadis import xrires
from openid.yadis.xri import XRI
from urlparse import urlsplit
from openid import message
### Tests for conditions that trigger DiscoveryFailure
class SimpleMockFetcher(object):
def __init__(self, responses):
self.responses = list(responses)
def fetch(self, url, body=None, headers=None):
response = self.responses.pop(0)
assert body is None
assert response.final_url == url
return response
class TestDiscoveryFailure(datadriven.DataDrivenTestCase):
cases = [
[HTTPResponse('http://network.error/', None)],
[HTTPResponse('http://not.found/', 404)],
[HTTPResponse('http://bad.request/', 400)],
[HTTPResponse('http://server.error/', 500)],
[HTTPResponse('http://header.found/', 200,
headers={'x-xrds-location':'http://xrds.missing/'}),
HTTPResponse('http://xrds.missing/', 404)],
]
def __init__(self, responses):
self.url = responses[0].final_url
datadriven.DataDrivenTestCase.__init__(self, self.url)
self.responses = responses
def setUp(self):
fetcher = SimpleMockFetcher(self.responses)
fetchers.setDefaultFetcher(fetcher)
def tearDown(self):
fetchers.setDefaultFetcher(None)
def runOneTest(self):
expected_status = self.responses[-1].status
try:
discover.discover(self.url)
except DiscoveryFailure, why:
self.failUnlessEqual(why.http_response.status, expected_status)
else:
self.fail('Did not raise DiscoveryFailure')
### Tests for raising/catching exceptions from the fetcher through the
### discover function
# Python 2.5 displays a message when running this test, which is
# testing the behaviour in the presence of string exceptions,
# deprecated or not, so tell it no to complain when this particular
# string exception is raised.
import warnings
warnings.filterwarnings('ignore', 'raising a string.*', DeprecationWarning,
r'^openid\.test\.test_discover$', 77)
class ErrorRaisingFetcher(object):
"""Just raise an exception when fetch is called"""
def __init__(self, thing_to_raise):
self.thing_to_raise = thing_to_raise
def fetch(self, url, body=None, headers=None):
raise self.thing_to_raise
class DidFetch(Exception):
"""Custom exception just to make sure it's not handled differently"""
class TestFetchException(datadriven.DataDrivenTestCase):
"""Make sure exceptions get passed through discover function from
fetcher."""
cases = [
Exception(),
DidFetch(),
ValueError(),
RuntimeError(),
]
# String exceptions are finally gone from Python 2.6.
if sys.version_info[:2] < (2, 6):
cases.append('oi!')
def __init__(self, exc):
datadriven.DataDrivenTestCase.__init__(self, repr(exc))
self.exc = exc
def setUp(self):
fetcher = ErrorRaisingFetcher(self.exc)
fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
def tearDown(self):
fetchers.setDefaultFetcher(None)
def runOneTest(self):
try:
discover.discover('http://doesnt.matter/')
except:
exc = sys.exc_info()[1]
if exc is None:
# str exception
self.failUnless(self.exc is sys.exc_info()[0])
else:
self.failUnless(self.exc is exc, exc)
else:
self.fail('Expected %r', self.exc)
### Tests for openid.consumer.discover.discover
class TestNormalization(unittest.TestCase):
def testAddingProtocol(self):
f = ErrorRaisingFetcher(RuntimeError())
fetchers.setDefaultFetcher(f, wrap_exceptions=False)
try:
discover.discover('users.stompy.janrain.com:8000/x')
except DiscoveryFailure, why:
self.fail('failed to parse url with port correctly')
except RuntimeError:
pass #expected
fetchers.setDefaultFetcher(None)
class DiscoveryMockFetcher(object):
redirect = None
def __init__(self, documents):
self.documents = documents
self.fetchlog = []
def fetch(self, url, body=None, headers=None):
self.fetchlog.append((url, body, headers))
if self.redirect:
final_url = self.redirect
else:
final_url = url
try:
ctype, body = self.documents[url]
except KeyError:
status = 404
ctype = 'text/plain'
body = ''
else:
status = 200
return HTTPResponse(final_url, status, {'content-type': ctype}, body)
# from twisted.trial import unittest as trialtest
class BaseTestDiscovery(unittest.TestCase):
id_url = "http://someuser.unittest/"
documents = {}
fetcherClass = DiscoveryMockFetcher
def _checkService(self, s,
server_url,
claimed_id=None,
local_id=None,
canonical_id=None,
types=None,
used_yadis=False,
display_identifier=None
):
self.failUnlessEqual(server_url, s.server_url)
if types == ['2.0 OP']:
self.failIf(claimed_id)
self.failIf(local_id)
self.failIf(s.claimed_id)
self.failIf(s.local_id)
self.failIf(s.getLocalID())
self.failIf(s.compatibilityMode())
self.failUnless(s.isOPIdentifier())
self.failUnlessEqual(s.preferredNamespace(),
discover.OPENID_2_0_MESSAGE_NS)
else:
self.failUnlessEqual(claimed_id, s.claimed_id)
self.failUnlessEqual(local_id, s.getLocalID())
if used_yadis:
self.failUnless(s.used_yadis, "Expected to use Yadis")
else:
self.failIf(s.used_yadis,
"Expected to use old-style discovery")
openid_types = {
'1.1': discover.OPENID_1_1_TYPE,
'1.0': discover.OPENID_1_0_TYPE,
'2.0': discover.OPENID_2_0_TYPE,
'2.0 OP': discover.OPENID_IDP_2_0_TYPE,
}
type_uris = [openid_types[t] for t in types]
self.failUnlessEqual(type_uris, s.type_uris)
self.failUnlessEqual(canonical_id, s.canonicalID)
if s.canonicalID:
self.failUnless(s.getDisplayIdentifier() != claimed_id)
self.failUnless(s.getDisplayIdentifier() is not None)
self.failUnlessEqual(display_identifier, s.getDisplayIdentifier())
self.failUnlessEqual(s.claimed_id, s.canonicalID)
self.failUnlessEqual(s.display_identifier or s.claimed_id, s.getDisplayIdentifier())
def setUp(self):
self.documents = self.documents.copy()
self.fetcher = self.fetcherClass(self.documents)
fetchers.setDefaultFetcher(self.fetcher)
def tearDown(self):
fetchers.setDefaultFetcher(None)
def readDataFile(filename):
module_directory = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(
module_directory, 'data', 'test_discover', filename)
return file(filename).read()
class TestDiscovery(BaseTestDiscovery):
def _discover(self, content_type, data,
expected_services, expected_id=None):
if expected_id is None:
expected_id = self.id_url
self.documents[self.id_url] = (content_type, data)
id_url, services = discover.discover(self.id_url)
self.failUnlessEqual(expected_services, len(services))
self.failUnlessEqual(expected_id, id_url)
return services
def test_404(self):
self.failUnlessRaises(DiscoveryFailure,
discover.discover, self.id_url + '/404')
def test_noOpenID(self):
services = self._discover(content_type='text/plain',
data="junk",
expected_services=0)
services = self._discover(
content_type='text/html',
data=readDataFile('openid_no_delegate.html'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id=self.id_url,
)
def test_html1(self):
services = self._discover(
content_type='text/html',
data=readDataFile('openid.html'),
expected_services=1)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_html1Fragment(self):
"""Ensure that the Claimed Identifier does not have a fragment
if one is supplied in the User Input."""
content_type = 'text/html'
data = readDataFile('openid.html')
expected_services = 1
self.documents[self.id_url] = (content_type, data)
expected_id = self.id_url
self.id_url = self.id_url + '#fragment'
id_url, services = discover.discover(self.id_url)
self.failUnlessEqual(expected_services, len(services))
self.failUnlessEqual(expected_id, id_url)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=expected_id,
local_id='http://smoker.myopenid.com/',
display_identifier=expected_id,
)
def test_html2(self):
services = self._discover(
content_type='text/html',
data=readDataFile('openid2.html'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=False,
types=['2.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_html1And2(self):
services = self._discover(
content_type='text/html',
data=readDataFile('openid_1_and_2.html'),
expected_services=2,
)
for t, s in zip(['2.0', '1.1'], services):
self._checkService(
s,
used_yadis=False,
types=[t],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadisEmpty(self):
services = self._discover(content_type='application/xrds+xml',
data=readDataFile('yadis_0entries.xml'),
expected_services=0)
def test_htmlEmptyYadis(self):
"""HTML document has discovery information, but points to an
empty Yadis document."""
# The XRDS document pointed to by "openid_and_yadis.html"
self.documents[self.id_url + 'xrds'] = (
'application/xrds+xml', readDataFile('yadis_0entries.xml'))
services = self._discover(content_type='text/html',
data=readDataFile('openid_and_yadis.html'),
expected_services=1)
self._checkService(
services[0],
used_yadis=False,
types=['1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadis1NoDelegate(self):
services = self._discover(content_type='application/xrds+xml',
data=readDataFile('yadis_no_delegate.xml'),
expected_services=1)
self._checkService(
services[0],
used_yadis=True,
types=['1.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id=self.id_url,
display_identifier=self.id_url,
)
def test_yadis2NoLocalID(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('openid2_xrds_no_local_id.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id=self.id_url,
display_identifier=self.id_url,
)
def test_yadis2(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('openid2_xrds.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadis2OP(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('yadis_idp.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0 OP'],
server_url="http://www.myopenid.com/server",
display_identifier=self.id_url,
)
def test_yadis2OPDelegate(self):
"""The delegate tag isn't meaningful for OP entries."""
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('yadis_idp_delegate.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0 OP'],
server_url="http://www.myopenid.com/server",
display_identifier=self.id_url,
)
def test_yadis2BadLocalID(self):
self.failUnlessRaises(DiscoveryFailure, self._discover,
content_type='application/xrds+xml',
data=readDataFile('yadis_2_bad_local_id.xml'),
expected_services=1,
)
def test_yadis1And2(self):
services = self._discover(
content_type='application/xrds+xml',
data=readDataFile('openid_1_and_2_xrds.xml'),
expected_services=1,
)
self._checkService(
services[0],
used_yadis=True,
types=['2.0', '1.1'],
server_url="http://www.myopenid.com/server",
claimed_id=self.id_url,
local_id='http://smoker.myopenid.com/',
display_identifier=self.id_url,
)
def test_yadis1And2BadLocalID(self):
self.failUnlessRaises(DiscoveryFailure, self._discover,
content_type='application/xrds+xml',
data=readDataFile('openid_1_and_2_xrds_bad_delegate.xml'),
expected_services=1,
)
class MockFetcherForXRIProxy(object):
def __init__(self, documents, proxy_url=xrires.DEFAULT_PROXY):
self.documents = documents
self.fetchlog = []
self.proxy_url = None
def fetch(self, url, body=None, headers=None):
self.fetchlog.append((url, body, headers))
u = urlsplit(url)
proxy_host = u[1]
xri = u[2]
query = u[3]
if not headers and not query:
raise ValueError("No headers or query; you probably didn't "
"mean to do that.")
if xri.startswith('/'):
xri = xri[1:]
try:
ctype, body = self.documents[xri]
except KeyError:
status = 404
ctype = 'text/plain'
body = ''
else:
status = 200
return HTTPResponse(url, status, {'content-type': ctype}, body)
class TestXRIDiscovery(BaseTestDiscovery):
fetcherClass = MockFetcherForXRIProxy
documents = {'=smoker': ('application/xrds+xml',
readDataFile('yadis_2entries_delegate.xml')),
'=smoker*bad': ('application/xrds+xml',
readDataFile('yadis_another_delegate.xml')) }
def test_xri(self):
user_xri, services = discover.discoverXRI('=smoker')
self._checkService(
services[0],
used_yadis=True,
types=['1.0'],
server_url="http://www.myopenid.com/server",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://smoker.myopenid.com/',
display_identifier='=smoker'
)
self._checkService(
services[1],
used_yadis=True,
types=['1.0'],
server_url="http://www.livejournal.com/openid/server.bml",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://frank.livejournal.com/',
display_identifier='=smoker'
)
def test_xri_normalize(self):
user_xri, services = discover.discoverXRI('xri://=smoker')
self._checkService(
services[0],
used_yadis=True,
types=['1.0'],
server_url="http://www.myopenid.com/server",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://smoker.myopenid.com/',
display_identifier='=smoker'
)
self._checkService(
services[1],
used_yadis=True,
types=['1.0'],
server_url="http://www.livejournal.com/openid/server.bml",
claimed_id=XRI("=!1000"),
canonical_id=XRI("=!1000"),
local_id='http://frank.livejournal.com/',
display_identifier='=smoker'
)
def test_xriNoCanonicalID(self):
user_xri, services = discover.discoverXRI('=smoker*bad')
self.failIf(services)
def test_useCanonicalID(self):
"""When there is no delegate, the CanonicalID should be used with XRI.
"""
endpoint = discover.OpenIDServiceEndpoint()
endpoint.claimed_id = XRI("=!1000")
endpoint.canonicalID = XRI("=!1000")
self.failUnlessEqual(endpoint.getLocalID(), XRI("=!1000"))
class TestXRIDiscoveryIDP(BaseTestDiscovery):
fetcherClass = MockFetcherForXRIProxy
documents = {'=smoker': ('application/xrds+xml',
readDataFile('yadis_2entries_idp.xml')) }
def test_xri(self):
user_xri, services = discover.discoverXRI('=smoker')
self.failUnless(services, "Expected services, got zero")
self.failUnlessEqual(services[0].server_url,
"http://www.livejournal.com/openid/server.bml")
class TestPreferredNamespace(datadriven.DataDrivenTestCase):
def __init__(self, expected_ns, type_uris):
datadriven.DataDrivenTestCase.__init__(
self, 'Expecting %s from %s' % (expected_ns, type_uris))
self.expected_ns = expected_ns
self.type_uris = type_uris
def runOneTest(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.type_uris = self.type_uris
actual_ns = endpoint.preferredNamespace()
self.failUnlessEqual(actual_ns, self.expected_ns)
cases = [
(message.OPENID1_NS, []),
(message.OPENID1_NS, ['http://jyte.com/']),
(message.OPENID1_NS, [discover.OPENID_1_0_TYPE]),
(message.OPENID1_NS, [discover.OPENID_1_1_TYPE]),
(message.OPENID2_NS, [discover.OPENID_2_0_TYPE]),
(message.OPENID2_NS, [discover.OPENID_IDP_2_0_TYPE]),
(message.OPENID2_NS, [discover.OPENID_2_0_TYPE,
discover.OPENID_1_0_TYPE]),
(message.OPENID2_NS, [discover.OPENID_1_0_TYPE,
discover.OPENID_2_0_TYPE]),
]
class TestIsOPIdentifier(unittest.TestCase):
def setUp(self):
self.endpoint = discover.OpenIDServiceEndpoint()
def test_none(self):
self.failIf(self.endpoint.isOPIdentifier())
def test_openid1_0(self):
self.endpoint.type_uris = [discover.OPENID_1_0_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_openid1_1(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_openid2(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_openid2OP(self):
self.endpoint.type_uris = [discover.OPENID_IDP_2_0_TYPE]
self.failUnless(self.endpoint.isOPIdentifier())
def test_multipleMissing(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE,
discover.OPENID_1_0_TYPE]
self.failIf(self.endpoint.isOPIdentifier())
def test_multiplePresent(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE,
discover.OPENID_1_0_TYPE,
discover.OPENID_IDP_2_0_TYPE]
self.failUnless(self.endpoint.isOPIdentifier())
class TestFromOPEndpointURL(unittest.TestCase):
def setUp(self):
self.op_endpoint_url = 'http://example.com/op/endpoint'
self.endpoint = discover.OpenIDServiceEndpoint.fromOPEndpointURL(
self.op_endpoint_url)
def test_isOPEndpoint(self):
self.failUnless(self.endpoint.isOPIdentifier())
def test_noIdentifiers(self):
self.failUnlessEqual(self.endpoint.getLocalID(), None)
self.failUnlessEqual(self.endpoint.claimed_id, None)
def test_compatibility(self):
self.failIf(self.endpoint.compatibilityMode())
def test_canonicalID(self):
self.failUnlessEqual(self.endpoint.canonicalID, None)
def test_serverURL(self):
self.failUnlessEqual(self.endpoint.server_url, self.op_endpoint_url)
class TestDiscoverFunction(unittest.TestCase):
def setUp(self):
self._old_discoverURI = discover.discoverURI
self._old_discoverXRI = discover.discoverXRI
discover.discoverXRI = self.discoverXRI
discover.discoverURI = self.discoverURI
def tearDown(self):
discover.discoverURI = self._old_discoverURI
discover.discoverXRI = self._old_discoverXRI
def discoverXRI(self, identifier):
return 'XRI'
def discoverURI(self, identifier):
return 'URI'
def test_uri(self):
self.failUnlessEqual('URI', discover.discover('http://woo!'))
def test_uriForBogus(self):
self.failUnlessEqual('URI', discover.discover('not a URL or XRI'))
def test_xri(self):
self.failUnlessEqual('XRI', discover.discover('xri://=something'))
def test_xriChar(self):
self.failUnlessEqual('XRI', discover.discover('=something'))
class TestEndpointSupportsType(unittest.TestCase):
def setUp(self):
self.endpoint = discover.OpenIDServiceEndpoint()
def failUnlessSupportsOnly(self, *types):
for t in [
'foo',
discover.OPENID_1_1_TYPE,
discover.OPENID_1_0_TYPE,
discover.OPENID_2_0_TYPE,
discover.OPENID_IDP_2_0_TYPE,
]:
if t in types:
self.failUnless(self.endpoint.supportsType(t),
"Must support %r" % (t,))
else:
self.failIf(self.endpoint.supportsType(t),
"Shouldn't support %r" % (t,))
def test_supportsNothing(self):
self.failUnlessSupportsOnly()
def test_openid2(self):
self.endpoint.type_uris = [discover.OPENID_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_2_0_TYPE)
def test_openid2provider(self):
self.endpoint.type_uris = [discover.OPENID_IDP_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_IDP_2_0_TYPE,
discover.OPENID_2_0_TYPE)
def test_openid1_0(self):
self.endpoint.type_uris = [discover.OPENID_1_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_0_TYPE)
def test_openid1_1(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_1_TYPE)
def test_multiple(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE)
def test_multipleWithProvider(self):
self.endpoint.type_uris = [discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE,
discover.OPENID_IDP_2_0_TYPE]
self.failUnlessSupportsOnly(discover.OPENID_1_1_TYPE,
discover.OPENID_2_0_TYPE,
discover.OPENID_IDP_2_0_TYPE,
)
class TestEndpointDisplayIdentifier(unittest.TestCase):
def test_strip_fragment(self):
endpoint = discover.OpenIDServiceEndpoint()
endpoint.claimed_id = 'http://recycled.invalid/#123'
self.failUnlessEqual('http://recycled.invalid/', endpoint.getDisplayIdentifier())
def pyUnitTests():
return datadriven.loadTests(__name__)
if __name__ == '__main__':
suite = pyUnitTests()
runner = unittest.TextTestRunner()
runner.run(suite)
|
p4datasystems/CarnotKEdist
|
refs/heads/master
|
dist/Lib/DocXMLRPCServer.py
|
250
|
"""Self documenting XML-RPC Server.
This module can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
This module is built upon the pydoc and SimpleXMLRPCServer
modules.
"""
import pydoc
import inspect
import re
import sys
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation()
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
|
direvus/ansible
|
refs/heads/devel
|
docs/bin/generate_man.py
|
23
|
#!/usr/bin/env python
import optparse
import os
import sys
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils._text import to_bytes
from ansible.utils._build_helpers import update_file_if_different
def generate_parser():
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options]',
description='Generate cli documentation from cli docstrings',
)
p.add_option("-t", "--template-file", action="store", dest="template_file", default="../templates/man.j2", help="path to jinja2 template")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files")
p.add_option("-f", "--output-format", action="store", dest="output_format", default='man', help="Output format for docs (the default 'man' or 'rst')")
return p
# from https://www.python.org/dev/peps/pep-0257/
def trim_docstring(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def get_options(optlist):
''' get actual options '''
opts = []
for opt in optlist:
res = {
'desc': opt.help,
'options': opt._short_opts + opt._long_opts
}
if opt.action == 'store':
res['arg'] = opt.dest.upper()
opts.append(res)
return opts
def get_option_groups(option_parser):
groups = []
for option_group in option_parser.option_groups:
group_info = {}
group_info['desc'] = option_group.get_description()
group_info['options'] = option_group.option_list
group_info['group_obj'] = option_group
groups.append(group_info)
return groups
def opt_doc_list(cli):
''' iterate over options lists '''
results = []
for option_group in cli.parser.option_groups:
results.extend(get_options(option_group.option_list))
results.extend(get_options(cli.parser.option_list))
return results
# def opts_docs(cli, name):
def opts_docs(cli_class_name, cli_module_name):
''' generate doc structure from options '''
cli_name = 'ansible-%s' % cli_module_name
if cli_module_name == 'adhoc':
cli_name = 'ansible'
# WIth no action/subcommand
# shared opts set
# instantiate each cli and ask its options
cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
cli = cli_klass([])
# parse the common options
try:
cli.parse()
except:
pass
# base/common cli info
docs = {
'cli': cli_module_name,
'cli_name': cli_name,
'usage': cli.parser.usage,
'short_desc': cli.parser.description,
'long_desc': trim_docstring(cli.__doc__),
'actions': {},
}
option_info = {'option_names': [],
'options': [],
'groups': []}
for extras in ('ARGUMENTS'):
if hasattr(cli, extras):
docs[extras.lower()] = getattr(cli, extras)
common_opts = opt_doc_list(cli)
groups_info = get_option_groups(cli.parser)
shared_opt_names = []
for opt in common_opts:
shared_opt_names.extend(opt.get('options', []))
option_info['options'] = common_opts
option_info['option_names'] = shared_opt_names
option_info['groups'].extend(groups_info)
docs.update(option_info)
# now for each action/subcommand
# force populate parser with per action options
# use class attrs not the attrs on a instance (not that it matters here...)
for action in getattr(cli_klass, 'VALID_ACTIONS', ()):
# instantiate each cli and ask its options
action_cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name,
fromlist=[cli_class_name]), cli_class_name)
# init with args with action added?
cli = action_cli_klass([])
cli.args.append(action)
try:
cli.parse()
except:
pass
# FIXME/TODO: needed?
# avoid dupe errors
cli.parser.set_conflict_handler('resolve')
cli.set_action()
action_info = {'option_names': [],
'options': []}
# docs['actions'][action] = {}
# docs['actions'][action]['name'] = action
action_info['name'] = action
action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__)
# docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip()
action_doc_list = opt_doc_list(cli)
uncommon_options = []
for action_doc in action_doc_list:
# uncommon_options = []
option_aliases = action_doc.get('options', [])
for option_alias in option_aliases:
if option_alias in shared_opt_names:
continue
# TODO: use set
if option_alias not in action_info['option_names']:
action_info['option_names'].append(option_alias)
if action_doc in action_info['options']:
continue
uncommon_options.append(action_doc)
action_info['options'] = uncommon_options
docs['actions'][action] = action_info
docs['options'] = opt_doc_list(cli)
return docs
if __name__ == '__main__':
parser = generate_parser()
options, args = parser.parse_args()
template_file = options.template_file
template_path = os.path.expanduser(template_file)
template_dir = os.path.abspath(os.path.dirname(template_path))
template_basename = os.path.basename(template_file)
output_dir = os.path.abspath(options.output_dir)
output_format = options.output_format
cli_modules = args
# various cli parsing things checks sys.argv if the 'args' that are passed in are []
# so just remove any args so the cli modules dont try to parse them resulting in warnings
sys.argv = [sys.argv[0]]
# need to be in right dir
os.chdir(os.path.dirname(__file__))
allvars = {}
output = {}
cli_list = []
cli_bin_name_list = []
# for binary in os.listdir('../../lib/ansible/cli'):
for cli_module_name in cli_modules:
binary = os.path.basename(os.path.expanduser(cli_module_name))
if not binary.endswith('.py'):
continue
elif binary == '__init__.py':
continue
cli_name = os.path.splitext(binary)[0]
if cli_name == 'adhoc':
cli_class_name = 'AdHocCLI'
# myclass = 'AdHocCLI'
output[cli_name] = 'ansible.1.rst.in'
cli_bin_name = 'ansible'
else:
# myclass = "%sCLI" % libname.capitalize()
cli_class_name = "%sCLI" % cli_name.capitalize()
output[cli_name] = 'ansible-%s.1.rst.in' % cli_name
cli_bin_name = 'ansible-%s' % cli_name
# FIXME:
allvars[cli_name] = opts_docs(cli_class_name, cli_name)
cli_bin_name_list.append(cli_bin_name)
cli_list = allvars.keys()
doc_name_formats = {'man': '%s.1.rst.in',
'rst': '%s.rst'}
for cli_name in cli_list:
# template it!
env = Environment(loader=FileSystemLoader(template_dir))
template = env.get_template(template_basename)
# add rest to vars
tvars = allvars[cli_name]
tvars['cli_list'] = cli_list
tvars['cli_bin_name_list'] = cli_bin_name_list
tvars['cli'] = cli_name
if '-i' in tvars['options']:
print('uses inventory')
manpage = template.render(tvars)
filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name'])
update_file_if_different(filename, to_bytes(manpage))
|
DC07/android_kernel_lge_dory
|
refs/heads/master
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
luceatnobis/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/usatoday.py
|
70
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
get_element_by_attribute,
parse_duration,
update_url_query,
ExtractorError,
)
from ..compat import compat_str
class USATodayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?usatoday\.com/(?:[^/]+/)*(?P<id>[^?/#]+)'
_TEST = {
'url': 'http://www.usatoday.com/media/cinematic/video/81729424/us-france-warn-syrian-regime-ahead-of-new-peace-talks/',
'md5': '4d40974481fa3475f8bccfd20c5361f8',
'info_dict': {
'id': '81729424',
'ext': 'mp4',
'title': 'US, France warn Syrian regime ahead of new peace talks',
'timestamp': 1457891045,
'description': 'md5:7e50464fdf2126b0f533748d3c78d58f',
'uploader_id': '29906170001',
'upload_date': '20160313',
}
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/29906170001/38a9eecc-bdd8-42a3-ba14-95397e48b3f8_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(update_url_query(url, {'ajax': 'true'}), display_id)
ui_video_data = get_element_by_attribute('class', 'ui-video-data', webpage)
if not ui_video_data:
raise ExtractorError('no video on the webpage', expected=True)
video_data = self._parse_json(ui_video_data, display_id)
return {
'_type': 'url_transparent',
'url': self.BRIGHTCOVE_URL_TEMPLATE % video_data['brightcove_id'],
'id': compat_str(video_data['id']),
'title': video_data['title'],
'thumbnail': video_data.get('thumbnail'),
'description': video_data.get('description'),
'duration': parse_duration(video_data.get('length')),
'ie_key': 'BrightcoveNew',
}
|
browseinfo/odoo_saas3_nicolas
|
refs/heads/master
|
openerp/loglevels.py
|
380
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
LOG_NOTSET = 'notset'
LOG_DEBUG = 'debug'
LOG_INFO = 'info'
LOG_WARNING = 'warn'
LOG_ERROR = 'error'
LOG_CRITICAL = 'critical'
# TODO get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are here until we refactor tools so that this module doesn't depends on tools.
def get_encodings(hint_encoding='utf-8'):
fallbacks = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}
if hint_encoding:
yield hint_encoding
if hint_encoding.lower() in fallbacks:
yield fallbacks[hint_encoding.lower()]
# some defaults (also taking care of pure ASCII)
for charset in ['utf8','latin1']:
if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset
from locale import getpreferredencoding
prefenc = getpreferredencoding()
if prefenc and prefenc.lower() != 'utf-8':
yield prefenc
prefenc = fallbacks.get(prefenc.lower())
if prefenc:
yield prefenc
def ustr(value, hint_encoding='utf-8', errors='strict'):
"""This method is similar to the builtin `unicode`, except
that it may try multiple encodings to find one that works
for decoding `value`, and defaults to 'utf-8' first.
:param: value: the value to convert
:param: hint_encoding: an optional encoding that was detecte
upstream and should be tried first to decode ``value``.
:param str errors: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor).
Passing anything other than 'strict' means that the first
encoding tried will be used, even if it's not the correct
one to use, so be careful! Ignored if value is not a string/unicode.
:raise: UnicodeError if value cannot be coerced to unicode
:return: unicode string representing the given value
"""
if isinstance(value, Exception):
return exception_to_unicode(value)
if isinstance(value, unicode):
return value
if not isinstance(value, basestring):
try:
return unicode(value)
except Exception:
raise UnicodeError('unable to convert %r' % (value,))
for ln in get_encodings(hint_encoding):
try:
return unicode(value, ln, errors=errors)
except Exception:
pass
raise UnicodeError('unable to convert %r' % (value,))
def exception_to_unicode(e):
if (sys.version_info[:2] < (2,6)) and hasattr(e, 'message'):
return ustr(e.message)
if hasattr(e, 'args'):
return "\n".join((ustr(a) for a in e.args))
try:
return unicode(e)
except Exception:
return u"Unknown message"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
galaxyproject/starforge
|
refs/heads/master
|
starforge/commands/cmd_sdist.py
|
3
|
"""
"""
from __future__ import absolute_import
import click
from os import getcwd
from ..cli import pass_context
from ..config.wheels import WheelConfigManager
from ..forge.wheels import ForgeWheel
from ..cache import CacheManager
from ..execution.local import LocalExecutionContext
from ..util import xdg_config_file
@click.command('wheel')
@click.option('--wheels-config',
default=xdg_config_file(name='wheels.yml'),
type=click.Path(file_okay=True,
writable=False,
resolve_path=True),
help='Path to wheels config file')
@click.option('-i', '--image',
default=None,
help='Name of image (in wheels config) under which wheel is '
'building')
@click.option('-o', '--output',
default=getcwd(),
type=click.Path(file_okay=False),
help='Copy output wheels to OUTPUT')
@click.option('-u', '--uid',
default=-1,
type=click.STRING,
help='Change ownership of output(s) to UID')
@click.option('-g', '--gid',
default=-1,
type=click.STRING,
help='Change group of output(s) to UID')
@click.option('--fetch-srcs/--no-fetch-srcs',
default=False,
help='Enable or disable fetching/caching of sources (normally '
'this is done by `starforge wheel`')
@click.argument('wheel')
@pass_context
def cli(ctx, wheels_config, image, output, uid, gid, fetch_srcs, wheel):
""" Build a Python source distribution (tarball).
This command is not typically meant to be run directly, you should use
`starforge wheel` instead. This is the command that `starforge wheel` calls
inside the virtualized environment to actually produce sdists.
WARNING: This command uses tarfile's extract_all() method, which is
insecure. Because this method is intended to be run under ephemeral
virtualization, this is not normally a concern, but if you are running it
by hand, you should be aware of the security risk.
"""
wheel_cfgmgr = WheelConfigManager.open(ctx.config, wheels_config)
cachemgr = CacheManager(ctx.config.cache_path)
wheel_config = wheel_cfgmgr.get_wheel_config(wheel)
# `image` is an image_name until here
image = wheel_config.get_image(image)
ectx = LocalExecutionContext(image)
forge = ForgeWheel(wheel_config, cachemgr, ectx.run_context, image=image)
if fetch_srcs:
forge.cache_sources()
forge.sdist(output=output, uid=uid, gid=gid)
|
MarkusAmshove/Photobox
|
refs/heads/master
|
photobox/__init__.py
|
12133432
| |
njncalub/storm
|
refs/heads/master
|
stormapp/__init__.py
|
12133432
| |
ray-pixar/Anki-Android
|
refs/heads/develop
|
tools/manage-crowdin.py
|
15
|
#!/usr/bin/python
# Copyright (c) 2010 norbert.nagold@gmail.com
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script updates the master file(s) for crowdin.net
# There seems to a bug in the upload of translation files; it's therefore deactivated
import pycurl
import StringIO
import sys
import string
import os
CROWDIN_KEY = ''
PROJECT_IDENTIFIER = 'ankidroid'
path = '../AnkiDroid/src/main/res/values/'
files = ['01-core', '02-strings', '03-dialogs', '04-network', '05-feedback', '06-statistics', '07-cardbrowser', '08-widget', '09-backup', '10-preferences', '11-arrays', '13-newfeatures', '14-marketdescription']
alllang = ['ar', 'ca', 'cs', 'de', 'el', 'es-AR', 'es-ES', 'fa', 'fi', 'fr', 'hu', 'id', 'it', 'ja', 'ko', 'nl', 'pl', 'pt-PT', 'pt-BR', 'ro', 'ru', 'sr', 'sv-SE', 'th', 'tr', 'vi', 'zh-CN', 'zh-TW']
def uploadtranslation(language, filename, sourcefile):
if len(language) > 2:
pathlan = string.replace(language, '-', '-r')
if not os.path.exists('../res/values-' + pathlan):
pathlan = pathlan[:2]
else:
pathlan = language
path = '../res/values-' + pathlan + '/'
filename = filename + '.xml'
# if selu == 's':
# filename = 'strings.xml'
# elif selu == 'a':
# filename = 'arrays.xml'
# else:
# filename = ''
# print "nothing to do"
print 'Update of Translation '+language+' for '+filename
if filename:
if language:
c = pycurl.Curl()
fields = [('files['+filename+']', (c.FORM_FILE, path + sourcefile + '.xml')), ('language', language), ('auto_approve_imported','0'), ('import_eq_suggestions','0')]
c.setopt(pycurl.URL, 'https://api.crowdin.com/api/project/' + PROJECT_IDENTIFIER + '/upload-translation?key=' + CROWDIN_KEY)
c.setopt(pycurl.HTTPPOST, fields)
b = StringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, b.write)
c.perform()
c.close()
print b.getvalue()
else:
print 'no language code entered'
def updateMasterFile(fn):
if fn == '14-marketdescription':
targetName = '14-marketdescription.txt'
sourceName = '../docs/marketing/localized_description/marketdescription.txt'
else:
targetName = fn + '.xml'
sourceName = path + targetName
if targetName:
print 'Update of Master File ' + targetName
c = pycurl.Curl()
fields = [('files['+targetName+']', (c.FORM_FILE, sourceName))]
c.setopt(pycurl.URL, 'https://api.crowdin.com/api/project/' + PROJECT_IDENTIFIER + '/update-file?key=' + CROWDIN_KEY)
c.setopt(pycurl.HTTPPOST, fields)
b = StringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, b.write)
c.perform()
c.close()
print b.getvalue()
try:
c = open("crowdin_key.txt","r+")
CROWDIN_KEY = c.readline();
c.close()
except IOError as e:
CROWDIN_KEY = raw_input("please enter your crowdin key or create \'crowdin_key.txt\': ")
sel = raw_input("update (m)aster file, update (t)ranslation or (r)efresh builds? ")
if sel == 'm':
# Update Master Files:
fn = raw_input("update " + ', '.join([str(x) for x in files]) + ", (all)?")
if fn == 'all':
for n in range(0, len(files)):
updateMasterFile(files[n])
else:
updateMasterFile(fn)
elif sel == 't':
# Update Translations:
print 'still problems with crowding here'
language = raw_input("enter language code: ")
selu = raw_input("update 0(1)-core, 0(2)-strings, 0(3)-dialogs, 0(4)-network, 0(5)-feedback, 0(6)-statistics, 0(7)-cardbrowser, 0(8)-widget, 0(9)-backup, (10)-preferences, (11)-arrays, (13)-newfeatures? ")
if selu == '12' or selu == '14':
print "translations of this file cannot be uploaded"
elif selu != 'all':
defaultSource = files[int(selu)-1]
sourcefile = raw_input("enter source file (default: " + defaultSource + "): ")
if sourcefile == "":
sourcefile = defaultSource
if language == 'all':
for language in alllang:
if selu == 'all':
for s in files:
uploadtranslation(language, s, s)
else:
uploadtranslation(language, files[int(selu)-1], sourcefile)
elif selu == 'all':
for s in files:
uploadtranslation(language, s, s)
else:
uploadtranslation(language, files[int(selu)-1], sourcefile)
elif sel == 'r':
# Update Translations:
print "Force translation export"
c = pycurl.Curl()
c.setopt(pycurl.URL, 'https://api.crowdin.com/api/project/' + PROJECT_IDENTIFIER + '/export?&key=' + CROWDIN_KEY)
b = StringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, b.write)
c.perform()
c.close()
print b.getvalue()
else:
print "nothing to do"
|
CI-WATER/tethys
|
refs/heads/master
|
tests/unit_tests/test_tethys_services/test_models/test_WebProcessingService.py
|
2
|
from tethys_sdk.testing import TethysTestCase
import tethys_services.models as service_model
from unittest import mock
from tethys_services.models import HTTPError, URLError
class WebProcessingServiceTests(TethysTestCase):
def set_up(self):
pass
def tear_down(self):
pass
def test_unicode(self):
wps = service_model.WebProcessingService(
name='test_sds',
)
self.assertEqual('test_sds', str(wps))
def test_activate(self):
wps = service_model.WebProcessingService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
wps.save()
# Check result
mock_wps = mock.MagicMock()
mock_wps.getcapabilities.return_value = 'test'
ret = wps.activate(mock_wps)
mock_wps.getcapabilities.assert_called()
self.assertEqual(mock_wps, ret)
def test_activate_http_error_404(self):
wps = service_model.WebProcessingService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
# Check result
mock_wps = mock.MagicMock()
mock_wps.getcapabilities.side_effect = HTTPError(url='test_url', code=404, msg='test_message',
hdrs='test_header', fp=None)
self.assertRaises(HTTPError, wps.activate, mock_wps)
def test_activate_http_error_not_404(self):
wps = service_model.WebProcessingService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
# Check result
mock_wps = mock.MagicMock()
mock_wps.getcapabilities.side_effect = HTTPError(url='test_url', code=500, msg='test_message',
hdrs='test_header', fp=None)
self.assertRaises(HTTPError, wps.activate, mock_wps)
def test_activate_url_error(self):
wps = service_model.WebProcessingService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
# Check result
mock_wps = mock.MagicMock()
mock_wps.getcapabilities.side_effect = URLError(reason='test_url')
ret = wps.activate(mock_wps)
self.assertIsNone(ret)
def test_activate_error(self):
wps = service_model.WebProcessingService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
# Check result
mock_wps = mock.MagicMock()
mock_wps.getcapabilities.side_effect = Exception
self.assertRaises(Exception, wps.activate, mock_wps)
@mock.patch('tethys_services.models.WebProcessingService.activate')
@mock.patch('tethys_services.models.WPS')
def test_get_engine(self, mock_wps, mock_activate):
wps = service_model.WebProcessingService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
wps.save()
# Execute
wps.get_engine()
# Check called
mock_wps.assert_called_with('http://localhost/geoserver/rest/', password='password',
skip_caps=True, username='foo', verbose=False)
mock_activate.assert_called_with(wps=mock_wps())
|
les-patissiers-du-web/django-website
|
refs/heads/master
|
patissiers/blog/templatetags/custom_filters.py
|
1
|
from django import template
import markdown
register = template.Library()
@register.filter
def markdown2html(content):
return markdown.markdown(content, extensions=['extra', 'attr_list', 'def_list', 'nl2br', 'toc'])
|
DeltaEpsilon-HackFMI2/FMICalendar-REST
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/operations.py
|
100
|
from django.db.backends.mysql.base import DatabaseOperations
from django.contrib.gis.db.backends.adapter import WKTAdapter
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.utils import six
class MySQLOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.backends.mysql.compiler'
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
geometry_functions = {
'bbcontains' : 'MBRContains', # For consistency w/PostGIS API
'bboverlaps' : 'MBROverlaps', # .. ..
'contained' : 'MBRWithin', # .. ..
'contains' : 'MBRContains',
'disjoint' : 'MBRDisjoint',
'equals' : 'MBREqual',
'exact' : 'MBREqual',
'intersects' : 'MBRIntersects',
'overlaps' : 'MBROverlaps',
'same_as' : 'MBREqual',
'touches' : 'MBRTouches',
'within' : 'MBRWithin',
}
gis_terms = dict([(term, None) for term in list(geometry_functions) + ['isnull']])
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, value, srid):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'expression'):
placeholder = self.get_expression_column(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
alias, col, db_type = lvalue
geo_col = '%s.%s' % (qn(alias), qn(col))
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
return "%s(%s, %s)" % (lookup_info, geo_col,
self.get_geom_placeholder(value, field.srid))
# TODO: Is this really necessary? MySQL can't handle NULL geometries
# in its spatial indexes anyways.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
|
kirca/odoo
|
refs/heads/master
|
addons/website_payment/__init__.py
|
389
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
monash-merc/karaage
|
refs/heads/master
|
karaage/plugins/kgusage/conf/__init__.py
|
63
|
# Copyright 2015 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
|
40223137/150601
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/weakref.py
|
769
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
dimroc/tensorflow-mnist-tutorial
|
refs/heads/master
|
lib/python3.6/site-packages/tensorflow/python/util/all_util.py
|
43
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate __all__ from a module docstring."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect as _inspect
import re as _re
import sys as _sys
_reference_pattern = _re.compile(r'^@@(\w+)$', flags=_re.MULTILINE)
def make_all(module_name, doc_string_modules=None):
"""Generates `__all__` from the docstring of one or more modules.
Usage: `make_all(__name__)` or
`make_all(__name__, [sys.modules(__name__), other_module])`. The doc string
modules must each a docstring, and `__all__` will contain all symbols with
`@@` references, where that symbol currently exists in the module named
`module_name`.
Args:
module_name: The name of the module (usually `__name__`).
doc_string_modules: a list of modules from which to take docstring.
If None, then a list containing only the module named `module_name` is used.
Returns:
A list suitable for use as `__all__`.
"""
if doc_string_modules is None:
doc_string_modules = [_sys.modules[module_name]]
cur_members = set([name for name, _
in _inspect.getmembers(_sys.modules[module_name])])
results = set()
for doc_module in doc_string_modules:
results.update([m.group(1)
for m in _reference_pattern.finditer(doc_module.__doc__)
if m.group(1) in cur_members])
return list(results)
# Hidden attributes are attributes that have been hidden by
# `remove_undocumented`. They can be re-instated by `reveal_undocumented`.
# This maps symbol names to a tuple, containing:
# (module object, attribute value)
_HIDDEN_ATTRIBUTES = {}
def reveal_undocumented(symbol_name, target_module=None):
"""Reveals a symbol that was previously removed by `remove_undocumented`.
This should be used by tensorflow internal tests only. It explicitly
defeats the encapsulation afforded by `remove_undocumented`.
It throws an exception when the symbol was not hidden in the first place.
Args:
symbol_name: a string representing the full absolute path of the symbol.
target_module: if specified, the module in which to restore the symbol.
"""
if symbol_name not in _HIDDEN_ATTRIBUTES:
raise LookupError('Symbol %s is not a hidden symbol' % symbol_name)
symbol_basename = symbol_name.split('.')[-1]
(original_module, attr_value) = _HIDDEN_ATTRIBUTES[symbol_name]
if not target_module: target_module = original_module
setattr(target_module, symbol_basename, attr_value)
def remove_undocumented(module_name, allowed_exception_list=None,
doc_string_modules=None):
"""Removes symbols in a module that are not referenced by a docstring.
Args:
module_name: the name of the module (usually `__name__`).
allowed_exception_list: a list of names that should not be removed.
doc_string_modules: a list of modules from which to take the docstrings.
If None, then a list containing only the module named `module_name` is used.
Furthermore, if a symbol previously added with `add_to_global_whitelist`,
then it will always be allowed. This is useful for internal tests.
Returns:
None
"""
current_symbols = set(dir(_sys.modules[module_name]))
should_have = make_all(module_name, doc_string_modules)
should_have += allowed_exception_list or []
extra_symbols = current_symbols - set(should_have)
target_module = _sys.modules[module_name]
for extra_symbol in extra_symbols:
# Skip over __file__, etc. Also preserves internal symbols.
if extra_symbol.startswith('_'): continue
fully_qualified_name = module_name + '.' + extra_symbol
_HIDDEN_ATTRIBUTES[fully_qualified_name] = (target_module,
getattr(target_module,
extra_symbol))
delattr(target_module, extra_symbol)
__all__ = [
'make_all',
'remove_undocumented',
'reveal_undocumented',
]
|
HomeHabbit/dialog-watson-client
|
refs/heads/master
|
dialog_watson_client/exceptions/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.