repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
kawamon/hue | refs/heads/master | desktop/core/ext-py/django-extensions-1.8.0/django_extensions/utils/deprecation.py | 8 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.utils.deprecation import RemovedInNextVersionWarning
class MarkedForDeprecationWarning(RemovedInNextVersionWarning):
pass
|
KirarinSnow/Google-Code-Jam | refs/heads/master | Qualification Round 2009/A.py | 1 | #!/usr/bin/env python
#
# Problem: Alien Language
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
import re
lth, wds, case = map(int, raw_input().split())
words = [raw_input() for i in range(wds)]
for i in range(case):
test = raw_input().replace(')', ']').replace('(', '[')
c = len(filter(lambda j: re.match(test, words[j]), range(wds)))
print "Case #%d: %d" % (i+1, c)
|
jasonlcrane/sked | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
|
softlayer/softlayer-cinder-driver | refs/heads/master | slos/test/fixtures/Account.py | 1 | getIscsiNetworkStorage = [{'id': 2,
'capacityGb': 1,
'username': 'foo',
'password': 'bar',
'billingItem': {'id': 2},
'serviceResourceBackendIpAddress': '10.0.0.2'}]
|
farodin91/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/doctest.py | 174 | """ discover and run doctests in modules and test files."""
from __future__ import absolute_import
import traceback
import pytest
from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
from _pytest.python import FixtureRequest
def pytest_addoption(parser):
parser.addini('doctest_optionflags', 'option flags for doctests',
type="args", default=["ELLIPSIS"])
group = parser.getgroup("collect")
group.addoption("--doctest-modules",
action="store_true", default=False,
help="run doctests in all .py modules",
dest="doctestmodules")
group.addoption("--doctest-glob",
action="append", default=[], metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob")
group.addoption("--doctest-ignore-import-errors",
action="store_true", default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors")
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules:
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_doctest(config, path, parent):
if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ['test*.txt']
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfuncargvalue)
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self.runner.run(self.dtest)
def repr_failure(self, excinfo):
import doctest
if excinfo.errisinstance((doctest.DocTestFailure,
doctest.UnexpectedException)):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
REPORT_UDIFF = doctest.REPORT_UDIFF
if lineno is not None:
lines = doctestfailure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = ["%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)]
# trim docstring error lines to 10
lines = lines[example.lineno - 9:example.lineno + 1]
else:
lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
indent = '>>>'
for line in example.source.splitlines():
lines.append('??? %s %s' % (indent, line))
indent = '...'
if excinfo.errisinstance(doctest.DocTestFailure):
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
else:
inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" %
repr(inner_excinfo.value)]
lines += traceback.format_exception(*excinfo.value.exc_info)
return ReprFailDoctest(reprlocation, lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, None, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
class DoctestTextfile(DoctestItem, pytest.Module):
def runtest(self):
import doctest
fixture_request = _setup_fixtures(self)
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
text = self.fspath.read()
filename = str(self.fspath)
name = self.fspath.basename
globs = dict(getfixture=fixture_request.getfuncargvalue)
if '__name__' not in globs:
globs['__name__'] = '__main__'
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_checker())
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
_check_all_skipped(test)
runner.run(test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip('all tests skipped by +SKIP option')
class DoctestModule(pytest.Module):
def collect(self):
import doctest
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue('doctest_ignore_import_errors'):
pytest.skip('unable to import module %r' % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = doctest.DocTestFinder()
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_checker())
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
cls=None, funcargs=False)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, 'LiteralsOutputChecker'):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r'\1\2', txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_UNICODE')
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_BYTES')
|
DeCarabas/millie | refs/heads/master | test.py | 1 | #!/usr/local/bin/python3
import locale
from collections import namedtuple
from pathlib import Path
from subprocess import run, PIPE
from time import perf_counter
def read_test_spec(path):
spec = {}
with open(path) as file:
for line in file:
if (not line) or line[0] != '#':
break
spec_part = line[1:].strip().split(':')
if len(spec_part) > 1:
spec[spec_part[0].strip()] = spec_part[1].strip()
return spec
test_result = namedtuple(
'test_result',
['result', 'path', 'elapsed', 'details', 'stderr', 'stdout'],
)
def parse_bool(val):
val = val.lower()
return val in ('true', 1, 'yes', 'y')
def run_test(path):
spec = read_test_spec(path)
if (
('Disabled' in spec and parse_bool(spec['Disabled'])) or
('Enabled' in spec and not parse_bool(spec['Enabled']))
):
return test_result(
result='skip',
path=path,
elapsed=0.0,
details=None,
stderr=None,
stdout=None,
)
args = ['./millie', path]
if 'ExpectedType' in spec:
args.append('--print-type')
start = perf_counter()
cp = run(
args,
stdout=PIPE,
stderr=PIPE,
encoding=locale.getpreferredencoding()
)
elapsed = perf_counter() - start
result = 'ok'
details = None
expect_failure = 'ExpectFailure' in spec
if cp.returncode and not expect_failure:
result = 'fail'
details = 'millie returned exit code {}'.format(cp.returncode)
else:
actual = cp.stdout.strip()
if 'Expected' in spec:
if actual != spec['Expected']:
result = 'fail'
details = 'Expected "{}" got "{}"'.format(
spec['Expected'],
actual
)
elif 'ExpectedType' in spec:
if actual != spec['ExpectedType']:
result = 'fail'
details = 'Expected type "{}" got "{}"'.format(
spec['ExpectedType'],
actual
)
else:
if not spec['ExpectedError'] in cp.stderr:
result = 'fail'
details = "Expected error '{}' to be reported".format(
spec['ExpectedError'],
)
return test_result(result, path, elapsed, details, cp.stderr, cp.stdout)
locale.setlocale(locale.LC_ALL, '')
for path in Path('./tests').glob('**/*.millie'):
result, path, elapsed, details, stderr, stdout = run_test(path)
print('[{0:<4}] {1} ({2:0.3}ms)'.format(result, path, elapsed * 1000))
if result == 'fail':
print(' ' + details)
print(' stdout:')
print(' ' + '\n '.join(stdout.split('\n')))
print(' stderr:')
print(' ' + '\n '.join(stderr.split('\n')))
|
mathdd/numpy | refs/heads/master | numpy/f2py/f2py2e.py | 174 | #!/usr/bin/env python
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2011 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import pprint
import re
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
f2py_version = __version__.version
errmess = sys.stderr.write
# outmess=sys.stdout.write
show = pprint.pprint
outmess = auxfuncs.outmess
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = 'N/A'
__usage__ = """\
Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mkdtemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{document},
\\end{document}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include-paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: %s
numpy Version: %s
Requires: Python 2.3 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version)
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
f, f2, f3, f5, f6, f7, f8, f9 = 1, 0, 0, 0, 0, 0, 0, 0
verbose = 1
dolc = -1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile, modulename = None, None
options = {'buildpath': buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l == '':
pass
elif l == 'only:':
f = 0
elif l == 'skip:':
f = -1
elif l == ':':
f = 1
elif l[:8] == '--debug-':
debug.append(l[8:])
elif l == '--lower':
dolc = 1
elif l == '--build-dir':
f6 = 1
elif l == '--no-lower':
dolc = 0
elif l == '--quiet':
verbose = 0
elif l == '--verbose':
verbose += 1
elif l == '--latex-doc':
dolatexdoc = 1
elif l == '--no-latex-doc':
dolatexdoc = 0
elif l == '--rest-doc':
dorestdoc = 1
elif l == '--no-rest-doc':
dorestdoc = 0
elif l == '--wrap-functions':
wrapfuncs = 1
elif l == '--no-wrap-functions':
wrapfuncs = 0
elif l == '--short-latex':
options['shortlatex'] = 1
elif l == '--coutput':
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
f2 = 1
elif l == '-m':
f3 = 1
elif l[:2] == '-v':
print(f2py_version)
sys.exit()
elif l == '--show-compilers':
f5 = 1
elif l[:8] == '-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
elif l[:15] in '--include_paths':
outmess(
'f2py option --include_paths is deprecated, use --include-paths instead.\n')
f7 = 1
elif l[:15] in '--include-paths':
f7 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
sys.exit()
elif f2:
f2 = 0
signsfile = l
elif f3:
f3 = 0
modulename = l
elif f6:
f6 = 0
buildpath = l
elif f7:
f7 = 0
include_paths.extend(l.split(os.pathsep))
elif f8:
f8 = 0
options["coutput"] = l
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
elif f == 1:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s. Skipping file "%s".\n' %
(str(detail), l))
elif f == -1:
skipfuncs.append(l)
elif f == 0:
onlyfuncs.append(l)
if not f5 and not files and not modulename:
print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s' % (buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath, signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess(
'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
sys.exit()
options['debug'] = debug
options['verbose'] = verbose
if dolc == -1 and not signsfile:
options['do-lower'] = 0
else:
options['do-lower'] = dolc
if modulename:
options['module'] = modulename
if signsfile:
options['signsfile'] = signsfile
if onlyfuncs:
options['onlyfuncs'] = onlyfuncs
if skipfuncs:
options['skipfuncs'] = skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
return files, options
def callcrackfortran(files, options):
rules.options = options
crackfortran.debug = options['debug']
crackfortran.verbose = options['verbose']
if 'module' in options:
crackfortran.f77modulename = options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs = options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs = options['onlyfuncs']
crackfortran.include_paths[:] = options['include_paths']
crackfortran.dolowercase = options['do-lower']
postlist = crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
pyf = crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
f = open(options['signsfile'], 'w')
f.write(pyf)
f.close()
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
for i in range(len(lst)):
if '__user__' in lst[i]['name']:
cb_rules.buildcallbacks(lst[i])
else:
if 'use' in lst[i]:
for u in lst[i]['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(lst[i]['name'])
modules.append(lst[i])
mnames.append(lst[i]['name'])
ret = {}
for i in range(len(mnames)):
if mnames[i] in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]])))
else:
um = []
if 'use' in modules[i]:
for u in modules[i]['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
'\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u))
ret[mnames[i]] = {}
dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um))
return ret
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""Run f2py as if string.join(comline_list,' ') is used as a command line.
In case of using -h flag, return None.
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
postlist = callcrackfortran(files, options)
isusedby = {}
for i in range(len(postlist)):
if 'use' in postlist[i]:
for u in postlist[i]['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(postlist[i]['name'])
for i in range(len(postlist)):
if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']:
if postlist[i]['name'] in isusedby:
# if not quiet:
outmess('Skipping Makefile build for module "%s" which is used by %s\n' % (
postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
'Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
for i in range(len(postlist)):
if postlist[i]['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError('All blocks must be python module blocks but got %s' % (
repr(postlist[i]['block'])))
auxfuncs.debugoptions = options['debug']
f90mod_rules.options = options
auxfuncs.wrapfuncs = options['wrapfuncs']
ret = buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
return ret
def filter_files(prefix, suffix, files, remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered, rest = [], []
match = re.compile(prefix + r'.*' + suffix + r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file):
filtered.append(file[ind:])
else:
rest.append(file)
return filtered, rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'[-][-]link[-]')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'[-][-](verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
# if __name__ == "__main__":
# main()
# EOF
|
be-cloud-be/horizon-addons | refs/heads/9.0 | server-tools/setup/base_name_search_improved/setup.py | 7073 | import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
|
hansenDise/scrapy | refs/heads/master | scrapy/commands/shell.py | 107 | """
Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from threading import Thread
from scrapy.commands import ScrapyCommand
from scrapy.shell import Shell
from scrapy.http import Request
from scrapy.utils.spider import spidercls_for_request, DefaultSpider
class Command(ScrapyCommand):
requires_project = False
default_settings = {'KEEP_ALIVE': True, 'LOGSTATS_INTERVAL': 0}
def syntax(self):
return "[url|file]"
def short_desc(self):
return "Interactive scraping console"
def long_desc(self):
return "Interactive console for scraping the given url"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-c", dest="code",
help="evaluate the code in the shell, print the result and exit")
parser.add_option("--spider", dest="spider",
help="use this spider")
def update_vars(self, vars):
"""You can use this function to update the Scrapy objects that will be
available in the shell
"""
pass
def run(self, args, opts):
url = args[0] if args else None
spider_loader = self.crawler_process.spider_loader
spidercls = DefaultSpider
if opts.spider:
spidercls = spider_loader.load(opts.spider)
elif url:
spidercls = spidercls_for_request(spider_loader, Request(url),
spidercls, log_multiple=True)
# The crawler is created this way since the Shell manually handles the
# crawling engine, so the set up in the crawl method won't work
crawler = self.crawler_process._create_crawler(spidercls)
# The Shell class needs a persistent engine in the crawler
crawler.engine = crawler._create_engine()
crawler.engine.start()
self._start_crawler_thread()
shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
shell.start(url=url)
def _start_crawler_thread(self):
t = Thread(target=self.crawler_process.start,
kwargs={'stop_after_crawl': False})
t.daemon = True
t.start()
|
raiden-network/raiden | refs/heads/develop | raiden/tests/utils/client.py | 1 | from raiden.constants import TRANSACTION_INTRINSIC_GAS
from raiden.network.rpc.client import EthTransfer, JSONRPCClient
from raiden.tests.utils.factories import HOP1
from raiden.utils.typing import Address
def burn_eth(rpc_client: JSONRPCClient, amount_to_leave: int = 0) -> None:
"""Burns all the ETH on the account of the given raiden service"""
address = rpc_client.address
web3 = rpc_client.web3
gas_price = web3.eth.gas_price
# Leave enough ETH to pay for the burn transaction.
amount_to_leave = TRANSACTION_INTRINSIC_GAS + amount_to_leave
amount_to_burn = web3.eth.get_balance(address) - gas_price * amount_to_leave
burn_transfer = EthTransfer(
to_address=Address(HOP1), value=amount_to_burn, gas_price=gas_price
)
transaction_hash = rpc_client.transact(burn_transfer)
rpc_client.poll_transaction(transaction_hash)
|
rlmh/OpenShadingLanguage | refs/heads/master | testsuite/testshade-expr/run.py | 12 | #!/usr/bin/env python
command += testshade ("-v -g 64 64 -od uint8 -o result out.tif -expr 'result=color(u,v,0)'")
outputs = [ "out.txt", "out.tif" ]
|
Weuxel/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/win/gyptest-link-mapfile.py | 254 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure mapfile settings are extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('mapfile.gyp', chdir=CHDIR)
test.build('mapfile.gyp', test.ALL, chdir=CHDIR)
map_file = test.built_file_path('test_mapfile_unset.map', chdir=CHDIR)
test.must_not_exist(map_file)
map_file = test.built_file_path('test_mapfile_generate.map', chdir=CHDIR)
test.must_exist(map_file)
test.must_contain(map_file, '?AnExportedFunction@@YAXXZ')
test.must_not_contain(map_file, 'void __cdecl AnExportedFunction(void)')
map_file = test.built_file_path('test_mapfile_generate_exports.map',
chdir=CHDIR)
test.must_exist(map_file)
test.must_contain(map_file, 'void __cdecl AnExportedFunction(void)')
map_file = test.built_file_path('test_mapfile_generate_filename.map',
chdir=CHDIR)
test.must_not_exist(map_file)
map_file = test.built_file_path('custom_file_name.map', chdir=CHDIR)
test.must_exist(map_file)
test.must_contain(map_file, '?AnExportedFunction@@YAXXZ')
test.must_not_contain(map_file, 'void __cdecl AnExportedFunction(void)')
test.pass_test()
|
dmoliveira/networkx | refs/heads/master | networkx/algorithms/tests/test_dag.py | 18 | #!/usr/bin/env python
from itertools import combinations
from nose.tools import *
from networkx.testing.utils import assert_edges_equal
import networkx as nx
class TestDAG:
def setUp(self):
pass
def test_topological_sort1(self):
DG = nx.DiGraph()
DG.add_edges_from([(1, 2), (1, 3), (2, 3)])
assert_equal(nx.topological_sort(DG), [1, 2, 3])
assert_equal(nx.topological_sort_recursive(DG), [1, 2, 3])
DG.add_edge(3, 2)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
DG.remove_edge(2, 3)
assert_equal(nx.topological_sort(DG), [1, 3, 2])
assert_equal(nx.topological_sort_recursive(DG), [1, 3, 2])
def test_reverse_topological_sort1(self):
DG = nx.DiGraph()
DG.add_edges_from([(1, 2), (1, 3), (2, 3)])
assert_equal(nx.topological_sort(DG, reverse=True), [3, 2, 1])
assert_equal(
nx.topological_sort_recursive(DG, reverse=True), [3, 2, 1])
DG.add_edge(3, 2)
assert_raises(nx.NetworkXUnfeasible,
nx.topological_sort, DG, reverse=True)
assert_raises(nx.NetworkXUnfeasible,
nx.topological_sort_recursive, DG, reverse=True)
DG.remove_edge(2, 3)
assert_equal(nx.topological_sort(DG, reverse=True), [2, 3, 1])
assert_equal(
nx.topological_sort_recursive(DG, reverse=True), [2, 3, 1])
def test_is_directed_acyclic_graph(self):
G = nx.generators.complete_graph(2)
assert_false(nx.is_directed_acyclic_graph(G))
assert_false(nx.is_directed_acyclic_graph(G.to_directed()))
assert_false(nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])))
assert_true(nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])))
def test_topological_sort2(self):
DG = nx.DiGraph({1: [2], 2: [3], 3: [4],
4: [5], 5: [1], 11: [12],
12: [13], 13: [14], 14: [15]})
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
assert_false(nx.is_directed_acyclic_graph(DG))
DG.remove_edge(1, 2)
assert_equal(nx.topological_sort_recursive(DG),
[11, 12, 13, 14, 15, 2, 3, 4, 5, 1])
assert_equal(nx.topological_sort(DG),
[11, 12, 13, 14, 15, 2, 3, 4, 5, 1])
assert_true(nx.is_directed_acyclic_graph(DG))
def test_topological_sort3(self):
DG = nx.DiGraph()
DG.add_edges_from([(1, i) for i in range(2, 5)])
DG.add_edges_from([(2, i) for i in range(5, 9)])
DG.add_edges_from([(6, i) for i in range(9, 12)])
DG.add_edges_from([(4, i) for i in range(12, 15)])
def validate(order):
ok_(isinstance(order, list))
assert_equal(set(order), set(DG))
for u, v in combinations(order, 2):
assert_false(nx.has_path(DG, v, u))
validate(nx.topological_sort_recursive(DG))
validate(nx.topological_sort(DG))
DG.add_edge(14, 1)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort, DG)
assert_raises(nx.NetworkXUnfeasible, nx.topological_sort_recursive, DG)
def test_topological_sort4(self):
G = nx.Graph()
G.add_edge(1, 2)
assert_raises(nx.NetworkXError, nx.topological_sort, G)
assert_raises(nx.NetworkXError, nx.topological_sort_recursive, G)
def test_topological_sort5(self):
G = nx.DiGraph()
G.add_edge(0, 1)
assert_equal(nx.topological_sort_recursive(G), [0, 1])
assert_equal(nx.topological_sort(G), [0, 1])
def test_nbunch_argument(self):
G = nx.DiGraph()
G.add_edges_from([(1, 2), (2, 3), (1, 4), (1, 5), (2, 6)])
assert_equal(nx.topological_sort(G), [1, 2, 3, 6, 4, 5])
assert_equal(nx.topological_sort_recursive(G), [1, 5, 4, 2, 6, 3])
assert_equal(nx.topological_sort(G, [1]), [1, 2, 3, 6, 4, 5])
assert_equal(nx.topological_sort_recursive(G, [1]), [1, 5, 4, 2, 6, 3])
assert_equal(nx.topological_sort(G, [5]), [5])
assert_equal(nx.topological_sort_recursive(G, [5]), [5])
def test_ancestors(self):
G = nx.DiGraph()
ancestors = nx.algorithms.dag.ancestors
G.add_edges_from([
(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
assert_equal(ancestors(G, 6), set([1, 2, 4, 5]))
assert_equal(ancestors(G, 3), set([1, 4]))
assert_equal(ancestors(G, 1), set())
assert_raises(nx.NetworkXError, ancestors, G, 8)
def test_descendants(self):
G = nx.DiGraph()
descendants = nx.algorithms.dag.descendants
G.add_edges_from([
(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)])
assert_equal(descendants(G, 1), set([2, 3, 6]))
assert_equal(descendants(G, 4), set([2, 3, 5, 6]))
assert_equal(descendants(G, 3), set())
assert_raises(nx.NetworkXError, descendants, G, 8)
def test_transitive_closure(self):
G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
transitive_closure = nx.algorithms.dag.transitive_closure
solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
assert_edges_equal(transitive_closure(G).edges(), solution)
G = nx.DiGraph([(1, 2), (2, 3), (2, 4)])
solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)]
assert_edges_equal(transitive_closure(G).edges(), solution)
G = nx.Graph([(1, 2), (2, 3), (3, 4)])
assert_raises(nx.NetworkXNotImplemented, transitive_closure, G)
def _check_antichains(self, solution, result):
sol = [frozenset(a) for a in solution]
res = [frozenset(a) for a in result]
assert_true(set(sol) == set(res))
def test_antichains(self):
antichains = nx.algorithms.dag.antichains
G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
solution = [[], [4], [3], [2], [1]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)])
solution = [[], [4], [7], [7, 4], [6], [6, 4], [6, 7], [6, 7, 4],
[5], [5, 4], [3], [3, 4], [2], [1]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph([(1, 2), (1, 3), (3, 4), (3, 5), (5, 6)])
solution = [[], [6], [5], [4], [4, 6], [4, 5], [3], [2], [2, 6],
[2, 5], [2, 4], [2, 4, 6], [2, 4, 5], [2, 3], [1]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph({0: [1, 2], 1: [4], 2: [3], 3: [4]})
solution = [[], [4], [3], [2], [1], [1, 3], [1, 2], [0]]
self._check_antichains(list(antichains(G)), solution)
G = nx.DiGraph()
self._check_antichains(list(antichains(G)), [[]])
G = nx.DiGraph()
G.add_nodes_from([0, 1, 2])
solution = [[], [0], [1], [1, 0], [2], [2, 0], [2, 1], [2, 1, 0]]
self._check_antichains(list(antichains(G)), solution)
f = lambda x: list(antichains(x))
G = nx.Graph([(1, 2), (2, 3), (3, 4)])
assert_raises(nx.NetworkXNotImplemented, f, G)
G = nx.DiGraph([(1, 2), (2, 3), (3, 1)])
assert_raises(nx.NetworkXUnfeasible, f, G)
def test_dag_longest_path(self):
longest_path = nx.algorithms.dag.dag_longest_path
G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)])
assert_equal(longest_path(G), [1, 2, 3, 5, 6])
G = nx.DiGraph(
[(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)])
assert_equal(longest_path(G), [1, 2, 3, 4, 5])
G = nx.Graph()
assert_raises(nx.NetworkXNotImplemented, longest_path, G)
G = nx.DiGraph()
G.add_weighted_edges_from(
[(1, 2, -5), (2, 3, 0), (3, 4, 1), (4, 5, 2), (3, 5, 4), (5, 6, 0), (1, 6, 2)])
assert_equal(longest_path(G), [2, 3, 5, 6])
def test_dag_longest_path_length(self):
longest_path_length = nx.algorithms.dag.dag_longest_path_length
G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)])
assert_equal(longest_path_length(G), 4)
G = nx.DiGraph(
[(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)])
assert_equal(longest_path_length(G), 4)
G = nx.Graph()
assert_raises(nx.NetworkXNotImplemented, longest_path_length, G)
G = nx.DiGraph()
G.add_weighted_edges_from(
[(1, 2, -5), (2, 3, 0), (3, 4, 1), (4, 5, 2), (3, 5, 4), (5, 6, 0), (1, 6, 2)])
assert_equal(longest_path_length(G), 3)
def test_is_aperiodic_cycle():
G = nx.DiGraph()
G.add_cycle([1, 2, 3, 4])
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_cycle2():
G = nx.DiGraph()
G.add_cycle([1, 2, 3, 4])
G.add_cycle([3, 4, 5, 6, 7])
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_cycle3():
G = nx.DiGraph()
G.add_cycle([1, 2, 3, 4])
G.add_cycle([3, 4, 5, 6])
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_cycle4():
G = nx.DiGraph()
G.add_cycle([1, 2, 3, 4])
G.add_edge(1, 3)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_selfloop():
G = nx.DiGraph()
G.add_cycle([1, 2, 3, 4])
G.add_edge(1, 1)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_raise():
G = nx.Graph()
assert_raises(nx.NetworkXError,
nx.is_aperiodic,
G)
def test_is_aperiodic_bipartite():
# Bipartite graph
G = nx.DiGraph(nx.davis_southern_women_graph())
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_rary_tree():
G = nx.full_rary_tree(3, 27, create_using=nx.DiGraph())
assert_false(nx.is_aperiodic(G))
def test_is_aperiodic_disconnected():
# disconnected graph
G = nx.DiGraph()
G.add_cycle([1, 2, 3, 4])
G.add_cycle([5, 6, 7, 8])
assert_false(nx.is_aperiodic(G))
G.add_edge(1, 3)
G.add_edge(5, 7)
assert_true(nx.is_aperiodic(G))
def test_is_aperiodic_disconnected2():
G = nx.DiGraph()
G.add_cycle([0, 1, 2])
G.add_edge(3, 3)
assert_false(nx.is_aperiodic(G))
|
phdowling/scikit-learn | refs/heads/master | sklearn/decomposition/incremental_pca.py | 199 | """Incremental Principal Components Analysis."""
# Author: Kyle Kastner <kastnerkyle@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from .base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _batch_mean_variance_update
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA.
This algorithm has constant memory complexity, on the order
of ``batch_size``, enabling use of np.memmap files without loading the
entire file into memory.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to ``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when ``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Notes
-----
Implements the incremental PCA model from:
`D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.`
See http://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.`
See http://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
`Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.`. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y: Passthrough for ``Pipeline`` compatibility.
Returns
-------
self: object
Returns the instance itself.
"""
self.components_ = None
self.mean_ = None
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
self.var_ = None
self.n_samples_seen_ = 0
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_):
self.partial_fit(X[batch])
return self
def partial_fit(self, X, y=None):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self: object
Returns the instance itself.
"""
X = check_array(X, copy=self.copy, dtype=np.float)
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
self.n_components_ = n_features
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0]
!= self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." % (
self.components_.shape[0], self.n_components_))
if self.components_ is None:
# This is the first pass through partial_fit
self.n_samples_seen_ = 0
col_var = X.var(axis=0)
col_mean = X.mean(axis=0)
X -= col_mean
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_samples)
else:
col_batch_mean = X.mean(axis=0)
col_mean, col_var, n_total_samples = _batch_mean_variance_update(
X, self.mean_, self.var_, self.n_samples_seen_)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ -
col_batch_mean)
X_combined = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X,
mean_correction))
U, S, V = linalg.svd(X_combined, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / n_total_samples
explained_variance_ratio = S ** 2 / np.sum(col_var *
n_total_samples)
self.n_samples_seen_ += n_samples
self.components_ = V[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
|
ericMayer/tekton-master | refs/heads/master | backend/venv/lib/python2.7/site-packages/pip/status_codes.py | 408 | SUCCESS = 0
ERROR = 1
UNKNOWN_ERROR = 2
VIRTUALENV_NOT_FOUND = 3
PREVIOUS_BUILD_DIR_ERROR = 4
NO_MATCHES_FOUND = 23
|
tsteward/the-blue-alliance | refs/heads/master | datafeeds/datafeed_fms.py | 12 | import logging
from datafeeds.datafeed_base import DatafeedBase
from datafeeds.fms_event_list_parser import FmsEventListParser
from datafeeds.fms_team_list_parser import FmsTeamListParser
from models.event import Event
from models.team import Team
class DatafeedFms(DatafeedBase):
FMS_EVENT_LIST_URL = "https://my.usfirst.org/frc/scoring/index.lasso?page=eventlist"
# Raw fast teamlist, no tpids
FMS_TEAM_LIST_URL = "https://my.usfirst.org/frc/scoring/index.lasso?page=teamlist"
def __init__(self, *args, **kw):
super(DatafeedFms, self).__init__(*args, **kw)
def getFmsEventList(self):
events, _ = self.parse(self.FMS_EVENT_LIST_URL, FmsEventListParser)
return [Event(
id="%s%s" % (event.get("year", None), event.get("event_short", None)),
end_date=event.get("end_date", None),
event_short=event.get("event_short", None),
first_eid=event.get("first_eid", None),
name=event.get("name", None),
official=True,
start_date=event.get("start_date", None),
venue=event.get("venue", None),
year=event.get("year", None)
)
for event in events]
def getFmsTeamList(self):
teams, _ = self.parse(self.FMS_TEAM_LIST_URL, FmsTeamListParser)
return [Team(
id="frc%s" % team.get("team_number", None),
name=self._shorten(team.get("name", None)),
nickname=self._shorten(team.get("nickname", None)),
team_number=team.get("team_number", None)
)
for team in teams]
|
pygeek/django | refs/heads/master | django/contrib/admin/views/decorators.py | 230 | from functools import wraps
from django.utils.translation import ugettext as _
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import login
from django.contrib.auth import REDIRECT_FIELD_NAME
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
defaults = {
'template_name': 'admin/login.html',
'authentication_form': AdminAuthenticationForm,
'extra_context': {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
},
}
return login(request, **defaults)
return _checklogin
|
switchboardOp/ansible | refs/heads/devel | test/units/modules/network/eos/__init__.py | 12133432 | |
i5o/openshot-sugar | refs/heads/master | openshot/windows/fontselector.py | 3 | # This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
########################################################
# This is a custom font selector window
# that displays each font name in it's own font
#
########################################################
import os
import gtk, pango
from windows.SimpleGtkBuilderApp import SimpleGtkBuilderApp
# init the foreign language
from language import Language_Init
class frmFontProperties(SimpleGtkBuilderApp):
def __init__(self, instance, path="fontselector.ui", root="frmFontProperties", domain="OpenShot", project=None, **kwargs):
SimpleGtkBuilderApp.__init__(self, os.path.join(project.UI_DIR, path), root, domain, **kwargs)
# Add language support
_ = Language_Init.Translator(project).lang.gettext
self.calling_form = instance
#get the list of available fonts
fonts = gtk.ListStore(str)
self.init_treeview(self.treeFontList)
pc = self.frmFontProperties.get_pango_context()
for family in pc.list_families():
fonts.append([family.get_name()])
self.treeFontList.set_model(fonts)
#sort the fonts alphabetically
fonts.set_sort_column_id(0, gtk.SORT_ASCENDING)
#add the callbacks
self.treeFontList.connect("cursor-changed", self.family_changed_cb)
self.btnItalic.connect("toggled", self.style_changed_cb)
self.btnBold.connect("toggled", self.weight_changed_cb)
self.frmFontProperties.show_all()
def init_treeview(self, tv):
cell = gtk.CellRendererText()
column = gtk.TreeViewColumn("Font family", cell, text=0, family=0)
tv.append_column(column)
def family_changed_cb(self, widget):
index = self.treeFontList.get_cursor()[0][0]
font_family = self.treeFontList.get_model()[index][0]
pc = self.treeFontList.get_pango_context()
fd = pc.get_font_description()
fd.set_family(font_family)
size = int(30) * pango.SCALE
fd.set_size(size)
self.preview.modify_font(fd)
self.btnBold.set_active(False)
self.btnItalic.set_active(False)
def style_changed_cb(self, widget):
pc = self.preview.get_pango_context()
fd = pc.get_font_description()
if self.btnItalic.get_active():
fd.set_style(pango.STYLE_ITALIC)
self.calling_form.font_style = 'italic'
else:
fd.set_style(pango.STYLE_NORMAL)
self.calling_form.font_style = 'normal'
self.preview.modify_font(fd)
def weight_changed_cb(self, widget):
pc = self.preview.get_pango_context()
fd = pc.get_font_description()
if self.btnBold.get_active():
fd.set_weight(pango.WEIGHT_BOLD)
self.calling_form.font_weight = 'bold'
else:
fd.set_weight(pango.WEIGHT_NORMAL)
self.calling_form.font_weight = 'normal'
self.preview.modify_font(fd)
def on_btnCancel_clicked(self, widget):
self.frmFontProperties.destroy()
def on_btnOK_clicked(self, widget):
index = self.treeFontList.get_cursor()[0][0]
font_family = self.treeFontList.get_model()[index][0]
self.calling_form.font_family = font_family
self.calling_form.set_font_style()
self.frmFontProperties.destroy()
def main():
frm_fontProperties = frmFontProperties()
frm_fontProperties.run()
if __name__ == "__main__":
main()
|
logicchains/ParticleBench | refs/heads/master | Py.py | 1 | from OpenGL.GL import *
from OpenGL.arrays import vbo
from OpenGL.GLUT import *
from OpenGL.GLU import *
#import pygame
#from pygame.locals import *
TITLE = "ParticleBench"
WIDTH = 800
HEIGHT = 600
MIN_X = -80
MAX_X = 80
MIN_Y = -90
MAX_Y = 50
MIN_DEPTH = 50
MAX_DEPTH = 250
START_RANGE = 15
START_X = (MIN_X + (MIN_X+MAX_X)/2)
START_Y = MAX_Y
START_DEPTH = (MIN_DEPTH + (MIN_DEPTH+MAX_DEPTH)/2)
POINTS_PER_SEC = 2000
MAX_INIT_VEL = 7
MAX_LIFE = 5000
MAX_SCALE = 4
WIND_CHANGE = 2000
MAX_WIND = 3
SPAWN_INTERVAL = 0.01
#RUNNING_TIME = ((MAX_LIFE / 1000) * 5)
RUNNING_TIME = ((5) * 5)
MAX_PTS = (RUNNING_TIME * POINTS_PER_SEC)
ambient = (0.8, 0.05, 0.1, 1)
diffuse = (1.0, 1.0, 1.0, 1)
lightPos = (MIN_X + (MAX_X-MIN_X)/2, MAX_Y, MIN_DEPTH, 0)
initT = 0.0
endT = 0.0
frameDur = 0.0
spwnTmr = 0.0
cleanupTmr = 0.0
runTmr = 0.0
frames = [0.0] * (RUNNING_TIME * 1000)
curFrame = 0
class Pt():
def __init__(self, X, Y, Z, VX, VY, VZ, R, Life, Alive):
self.X = X
self.Y = Y
self.Z = Z
self.VX = VX
self.VY = VY
self.VZ = VZ
self.R = R
self.Life = Life
self.Alive = Alive
Pts = [Pt(0,0,0,0,0,0,0,0,0) for _ in range(MAX_PTS)]
maxPt = 0
minPt = 0
seed = 1234569
gVBO = None
windX = 0
windY = 0
windZ = 0
grav = 0.5
Vertices = [-1.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,-1.0, 1.0, 1.0, 0.0, 0.0, 1.0, -1.0, -1.0, -1.0, 0.0, 0.0, -1.0, -1.0, 1.0, -1.0, 0.0, 0.0, -1.0, 1.0, 1.0, -1.0, 0.0, 0.0, -1.0, 1.0, -1.0, -1.0, 0.0, 0.0, -1.0, -1.0, 1.0, -1.0, 0.0, 1.0, 0.0, -1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, -1.0, 0.0, 1.0, 0.0, -1.0, -1.0, -1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 0.0, -1.0, 0.0, 1.0, -1.0, 1.0, 0.0, -1.0, 0.0, -1.0, -1.0, 1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 1.0, 0.0, 0.0, -1.0, -1.0, -1.0, -1.0, 0.0, 0.0, -1.0, -1.0, 1.0, -1.0, 0.0, 0.0, -1.0, 1.0, 1.0, -1.0, 0.0, 0.0, -1.0, 1.0, -1.0, -1.0, 0.0, 0.0]
def rand():
global seed
seed ^= seed << 13
seed ^= seed >> 17
seed ^= seed << 5
return seed
def movPts(secs):
for i in range(minPt, maxPt):
if Pts[i].Alive == 0:
continue
Pts[i].X += Pts[i].VX * secs
Pts[i].Y += Pts[i].VY * secs
Pts[i].Z += Pts[i].VZ * secs
Pts[i].VX += windX * 1 / Pts[i].R
Pts[i].VY += windY * 1 / Pts[i].R
Pts[i].VY -= grav
Pts[i].VZ += windZ * 1 / Pts[i].R
Pts[i].Life -= secs
if Pts[i].Life <= 0:
Pts[i].Alive = false
def spwnPts(secs):
num = secs * POINTS_PER_SEC;
global Pts
global numPts
for i in range(0, num):
pt = Pt(0 + rand()%START_RANGE - START_RANGE/2, START_Y,START_DEPTH + rand()%START_RANGE - START_RANGE/2, rand() % MAX_INIT_VEL, rand() % MAX_INIT_VEL, rand() % MAX_INIT_VEL, (rand() % (MAX_SCALE * 100)) / 200, (rand() % MAX_LIFE) / 1000, 1)
Pts[numPts] = pt
numPts+=1
def doWind():
global windX
global windY
global windZ
windX += ( (rand % WIND_CHANGE)/WIND_CHANGE - WIND_CHANGE/2000) * frameDur
windY += ( (rand % WIND_CHANGE)/WIND_CHANGE - WIND_CHANGE/2000) * frameDur
windZ += ( (rand % WIND_CHANGE)/WIND_CHANGE - WIND_CHANGE/2000) * frameDur
if (math.fabs(windX) > MAX_WIND):
windX *= -0.5
if (math.fabs(windY) > MAX_WIND):
windY *= -0.5
if (math.fabs(windZ) > MAX_WIND):
windZ *= -0.5
def checkColls():
for i in range(minPt, maxPt):
if (Pts[i].Alive == false):
continue
if (Pts[i].X < MIN_X):
Pts[i].X = MIN_X + Pts[i].R
Pts[i].VX *= -1.1
if (Pts[i].X > MAX_X):
Pts[i].X = MAX_X - Pts[i].R
Pts[i].VX *= -1.1
if (Pts[i].Y < MIN_Y):
Pts[i].Y = MIN_Y + Pts[i].R
Pts[i].VY *= -1.1
if (Pts[i].Y > MAX_Y):
Pts[i].Y = MAX_Y - Pts[i].R
Pts[i].VY *= -1.1
if (Pts[i].Z < MIN_DEPTH):
Pts[i].Z = MIN_DEPTH + Pts[i].R
Pts[i].VZ *= -1.1
if (Pts[i].Z > MAX_DEPTH):
Pts[i].Z = MAX_DEPTH - Pts[i].R
Pts[i].VZ *= -1.1
def cleanupPtPool():
global minPt
for i in range(minPt, maxPt):
if (Pts[i].Alive == true):
minPt = i
break
def initScene():
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glClearColor(0.1, 0.1, 0.6, 1.0)
glClearDepth(1)
glDepthFunc(GL_LEQUAL)
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse)
glLightfv(GL_LIGHT0, GL_POSITION, lightPos)
glEnable(GL_LIGHT0)
glViewport(0, 0, WIDTH, HEIGHT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-1, 1, -1, 1, 1.0, 1000.0)
glRotatef(20, 1, 0, 0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glPushMatrix()
global gVBO
glGenBuffers(1)
# glBindBuffer( GL_ARRAY_BUFFER, gVBO );
# glBufferData( GL_ARRAY_BUFFER, len(Vertices) * sizeOfFloat, array_type(*Vertices), GL_STATIC_DRAW )
# glEnableClientState( GL_VERTEX_ARRAY )
# glEnableClientState( GL_NORMAL_ARRAY )
# glVertexPointer( 3, GL_FLOAT, 24, null )
# glNormalPointer( GL_FLOAT, 12, 0)
# glMatrixMode(GL_MODELVIEW)
SCREEN_SIZE = (800, 600)
if __name__ == '__main__':
glutInit()
glutInitWindowSize(WIDTH,HEIGHT)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA)
glutCreateWindow(TITLE)
# glutDisplayFunc(mainLoop)
# pygame.init()
# screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE|OPENGL|DOUBLEBUF)
# resize(*SCREEN_SIZE)
initScene()
glutMainLoop()
|
MatrixGamesHub/mtxPython | refs/heads/master | src/mtxNet/rendererService/constants.py | 112 | #
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
from .ttypes import *
|
D4wN/brickv | refs/heads/master | src/brickv/bindings/bricklet_co2.py | 1 | # -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2015-11-17. #
# #
# Bindings Version 2.1.6 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
#### __DEVICE_IS_NOT_RELEASED__ ####
try:
from collections import namedtuple
except ImportError:
try:
from .ip_connection import namedtuple
except ValueError:
from ip_connection import namedtuple
try:
from .ip_connection import Device, IPConnection, Error
except ValueError:
from ip_connection import Device, IPConnection, Error
GetCO2ConcentrationCallbackThreshold = namedtuple('CO2ConcentrationCallbackThreshold', ['option', 'min', 'max'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletCO2(Device):
"""
Measures CO2 concentration in ppm
"""
DEVICE_IDENTIFIER = 262
DEVICE_DISPLAY_NAME = 'CO2 Bricklet'
CALLBACK_CO2_CONCENTRATION = 8
CALLBACK_CO2_CONCENTRATION_REACHED = 9
FUNCTION_GET_CO2_CONCENTRATION = 1
FUNCTION_SET_CO2_CONCENTRATION_CALLBACK_PERIOD = 2
FUNCTION_GET_CO2_CONCENTRATION_CALLBACK_PERIOD = 3
FUNCTION_SET_CO2_CONCENTRATION_CALLBACK_THRESHOLD = 4
FUNCTION_GET_CO2_CONCENTRATION_CALLBACK_THRESHOLD = 5
FUNCTION_SET_DEBOUNCE_PERIOD = 6
FUNCTION_GET_DEBOUNCE_PERIOD = 7
FUNCTION_GET_IDENTITY = 255
THRESHOLD_OPTION_OFF = 'x'
THRESHOLD_OPTION_OUTSIDE = 'o'
THRESHOLD_OPTION_INSIDE = 'i'
THRESHOLD_OPTION_SMALLER = '<'
THRESHOLD_OPTION_GREATER = '>'
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon)
self.api_version = (2, 0, 0)
self.response_expected[BrickletCO2.FUNCTION_GET_CO2_CONCENTRATION] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletCO2.FUNCTION_SET_CO2_CONCENTRATION_CALLBACK_PERIOD] = BrickletCO2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletCO2.FUNCTION_GET_CO2_CONCENTRATION_CALLBACK_PERIOD] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletCO2.FUNCTION_SET_CO2_CONCENTRATION_CALLBACK_THRESHOLD] = BrickletCO2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletCO2.FUNCTION_GET_CO2_CONCENTRATION_CALLBACK_THRESHOLD] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletCO2.FUNCTION_SET_DEBOUNCE_PERIOD] = BrickletCO2.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletCO2.FUNCTION_GET_DEBOUNCE_PERIOD] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletCO2.CALLBACK_CO2_CONCENTRATION] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_FALSE
self.response_expected[BrickletCO2.CALLBACK_CO2_CONCENTRATION_REACHED] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_FALSE
self.response_expected[BrickletCO2.FUNCTION_GET_IDENTITY] = BrickletCO2.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletCO2.CALLBACK_CO2_CONCENTRATION] = 'H'
self.callback_formats[BrickletCO2.CALLBACK_CO2_CONCENTRATION_REACHED] = 'H'
def get_co2_concentration(self):
"""
Returns the measured CO2 concentration. The value is in
`ppm (parts per million) <https://en.wikipedia.org/wiki/Parts-per_notation>`__
and between 0 to TBD.
If you want to get the CO2 concentration periodically, it is recommended to use the
callback :func:`CO2Concentration` and set the period with
:func:`SetCO2ConcentrationCallbackPeriod`.
"""
return self.ipcon.send_request(self, BrickletCO2.FUNCTION_GET_CO2_CONCENTRATION, (), '', 'H')
def set_co2_concentration_callback_period(self, period):
"""
Sets the period in ms with which the :func:`CO2Concentration` callback is triggered
periodically. A value of 0 turns the callback off.
:func:`CO2Concentration` is only triggered if the co2_concentration has changed since the
last triggering.
The default value is 0.
"""
self.ipcon.send_request(self, BrickletCO2.FUNCTION_SET_CO2_CONCENTRATION_CALLBACK_PERIOD, (period,), 'I', '')
def get_co2_concentration_callback_period(self):
"""
Returns the period as set by :func:`SetCO2ConcentrationCallbackPeriod`.
"""
return self.ipcon.send_request(self, BrickletCO2.FUNCTION_GET_CO2_CONCENTRATION_CALLBACK_PERIOD, (), '', 'I')
def set_co2_concentration_callback_threshold(self, option, min, max):
"""
Sets the thresholds for the :func:`CO2ConcentrationReached` callback.
The following options are possible:
.. csv-table::
:header: "Option", "Description"
:widths: 10, 100
"'x'", "Callback is turned off"
"'o'", "Callback is triggered when the co2_concentration is *outside* the min and max values"
"'i'", "Callback is triggered when the co2_concentration is *inside* the min and max values"
"'<'", "Callback is triggered when the co2_concentration is smaller than the min value (max is ignored)"
"'>'", "Callback is triggered when the co2_concentration is greater than the min value (max is ignored)"
The default value is ('x', 0, 0).
"""
self.ipcon.send_request(self, BrickletCO2.FUNCTION_SET_CO2_CONCENTRATION_CALLBACK_THRESHOLD, (option, min, max), 'c H H', '')
def get_co2_concentration_callback_threshold(self):
"""
Returns the threshold as set by :func:`SetCO2ConcentrationCallbackThreshold`.
"""
return GetCO2ConcentrationCallbackThreshold(*self.ipcon.send_request(self, BrickletCO2.FUNCTION_GET_CO2_CONCENTRATION_CALLBACK_THRESHOLD, (), '', 'c H H'))
def set_debounce_period(self, debounce):
"""
Sets the period in ms with which the threshold callbacks
* :func:`CO2ConcentrationReached`,
are triggered, if the thresholds
* :func:`SetCO2ConcentrationCallbackThreshold`,
keep being reached.
The default value is 100.
"""
self.ipcon.send_request(self, BrickletCO2.FUNCTION_SET_DEBOUNCE_PERIOD, (debounce,), 'I', '')
def get_debounce_period(self):
"""
Returns the debounce period as set by :func:`SetDebouncePeriod`.
"""
return self.ipcon.send_request(self, BrickletCO2.FUNCTION_GET_DEBOUNCE_PERIOD, (), '', 'I')
def get_identity(self):
"""
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c' or 'd'.
The device identifier numbers can be found :ref:`here <device_identifier>`.
|device_identifier_constant|
"""
return GetIdentity(*self.ipcon.send_request(self, BrickletCO2.FUNCTION_GET_IDENTITY, (), '', '8s 8s c 3B 3B H'))
def register_callback(self, id, callback):
"""
Registers a callback with ID *id* to the function *callback*.
"""
self.registered_callbacks[id] = callback
CO2 = BrickletCO2 # for backward compatibility
|
mkmelin/bedrock | refs/heads/master | tests/pages/contribute/events.py | 11 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.contribute.base import ContributeBasePage
class ContributeEventsPage(ContributeBasePage):
URL_TEMPLATE = '/{locale}/contribute/events'
_events_table_locator = (By.CSS_SELECTOR, '.events-table > tbody > tr')
@property
def events_table_is_displayed(self):
return self.is_element_displayed(*self._events_table_locator)
|
louistin/fullstack | refs/heads/master | Python/others/return.py | 1 | #!/usr/bin/python
def func():
return 0, 1, 2, 3
value = func()
print(value[0])
print(value[3])
|
tectronics/kochief | refs/heads/master | kochief/cataloging/__init__.py | 12133432 | |
orione7/plugin.video.streamondemand-pureita | refs/heads/master | servers/vodlocker.py | 12133432 | |
fengbaicanhe/intellij-community | refs/heads/master | python/testData/resolve/multiFile/fromQualifiedPackageImportFile/mypackage/child/testfile.py | 12133432 | |
sebastien-forestier/NIPS2017 | refs/heads/master | ros/nips2017/src/nips2017/ergo/ergo.py | 1 | import rospy
import rosnode
import json
from sensor_msgs.msg import Joy
from nips2017.srv import *
from nips2017.msg import *
from poppy_msgs.srv import ReachTarget, ReachTargetRequest, SetCompliant, SetCompliantRequest
from sensor_msgs.msg import Joy, JointState
from std_msgs.msg import Bool
from rospkg import RosPack
from os.path import join
from .button import Button
class Ergo(object):
def __init__(self):
self.rospack = RosPack()
with open(join(self.rospack.get_path('nips2017'), 'config', 'ergo.json')) as f:
self.params = json.load(f)
self.button = Button(self.params)
self.rate = rospy.Rate(self.params['publish_rate'])
# Service callers
self.robot_reach_srv_name = '{}/reach'.format(self.params['robot_name'])
self.robot_compliant_srv_name = '{}/set_compliant'.format(self.params['robot_name'])
rospy.loginfo("Ergo node is waiting for poppy controllers...")
rospy.wait_for_service(self.robot_reach_srv_name)
rospy.wait_for_service(self.robot_compliant_srv_name)
self.reach_proxy = rospy.ServiceProxy(self.robot_reach_srv_name, ReachTarget)
self.compliant_proxy = rospy.ServiceProxy(self.robot_compliant_srv_name, SetCompliant)
rospy.loginfo("Controllers connected!")
self.state_pub = rospy.Publisher('ergo/state', CircularState, queue_size=1)
self.button_pub = rospy.Publisher('ergo/button', Bool, queue_size=1)
self.goals = []
self.goal = 0.
self.joy1_x = 0.
self.joy1_y = 0.
self.joy2_x = 0.
self.joy2_y = 0.
self.motion_started_joy = 0.
self.js = JointState()
rospy.Subscriber('sensors/joystick/1', Joy, self.cb_joy_1)
rospy.Subscriber('sensors/joystick/2', Joy, self.cb_joy_2)
rospy.Subscriber('{}/joint_state'.format(self.params['robot_name']), JointState, self.cb_js)
self.t = rospy.Time.now()
self.srv_reset = None
self.extended = False
self.standby = False
self.last_activity = rospy.Time.now()
self.delta_t = rospy.Time.now()
def cb_js(self, msg):
self.js = msg
def reach(self, target, duration):
js = JointState()
js.name = target.keys()
js.position = target.values()
self.reach_proxy(ReachTargetRequest(target=js,
duration=rospy.Duration(duration)))
def set_compliant(self, compliant):
self.compliant_proxy(SetCompliantRequest(compliant=compliant))
def cb_joy_1(self, msg):
self.joy1_x = msg.axes[0]
self.joy1_y = msg.axes[1]
def cb_joy_2(self, msg):
self.joy2_x = msg.axes[0]
self.joy2_y = msg.axes[1]
def go_to_start(self, slow=True):
self.go_to([0.0, -15.4, 35.34, 0.0, -15.69, 71.99], 4 if slow else 1)
def go_to_extended(self):
extended = {'m2': 60, 'm3': -37, 'm5': -50, 'm6': 96}
self.reach(extended, 0.5)
self.extended = True
def go_to_rest(self):
rest = {'m2': -26, 'm3': 59, 'm5': -30, 'm6': 78}
self.reach(rest, 0.5)
self.extended = False
def is_controller_running(self):
return len([node for node in rosnode.get_node_names() if 'controller' in node]) > 0
def go_or_resume_standby(self):
recent_activity = rospy.Time.now() - self.last_activity < rospy.Duration(self.params['auto_standby_duration'])
if recent_activity and self.standby:
rospy.loginfo("Ergo is resuming from standby")
self.set_compliant(False)
self.standby = False
elif not self.standby and not recent_activity:
rospy.loginfo("Ergo is entering standby mode")
self.standby = True
self.set_compliant(True)
if self.is_controller_running():
self.last_activity = rospy.Time.now()
def go_to(self, motors, duration):
self.goals = motors
self.goal = self.goals[0] - self.goals[3]
self.reach(dict(zip(['m1', 'm2', 'm3', 'm4', 'm5', 'm6'], motors)), duration)
rospy.sleep(duration)
def run(self):
self.go_to_start()
self.last_activity = rospy.Time.now()
self.srv_reset = rospy.Service('ergo/reset', Reset, self._cb_reset)
rospy.loginfo('Ergo is ready and starts joystick servoing...')
self.t = rospy.Time.now()
while not rospy.is_shutdown():
now = rospy.Time.now()
self.delta_t = (now - self.t).to_sec()
self.t = now
self.go_or_resume_standby()
self.servo_robot(self.joy1_y, self.joy1_x)
self.publish_state()
self.publish_button()
# Update the last activity
if abs(self.joy1_x) > self.params['min_joy_activity'] or abs(self.joy1_y) > self.params['min_joy_activity']:
self.last_activity = rospy.Time.now()
self.rate.sleep()
def servo_axis_rotation(self, x):
x = x if abs(x) > self.params['sensitivity_joy'] else 0
min_x = self.params['bounds'][0][0] + self.params['bounds'][3][0]
max_x = self.params['bounds'][0][1] + self.params['bounds'][3][1]
self.goal = min(max(min_x, self.goal + self.params['speed']*x*self.delta_t), max_x)
if self.goal > self.params['bounds'][0][1]:
new_x_m3 = self.params['bounds'][0][1] - self.goal
new_x = self.params['bounds'][0][1]
elif self.goal < self.params['bounds'][0][0]:
new_x_m3 = self.params['bounds'][0][0] - self.goal
new_x = self.params['bounds'][0][0]
else:
new_x = self.goal
new_x_m3 = 0
new_x_m3 = max(min(new_x_m3, self.params['bounds'][3][1]), self.params['bounds'][3][0])
self.reach({'m1': new_x, 'm4': new_x_m3}, 0) # Duration = 0 means joint teleportation
def servo_axis_elongation(self, x):
if x > self.params['min_joy_elongation']:
self.go_to_extended()
else:
self.go_to_rest()
def servo_robot(self, x, y):
now = rospy.Time.now().to_sec()
max_abs = max(abs(y), abs(x))
if max_abs > self.params['sensitivity_joy'] and self.motion_started_joy == 0.:
self.motion_started_joy = now
elif max_abs < self.params['sensitivity_joy'] and self.motion_started_joy > 0.:
self.motion_started_joy = 0.
self.servo_axis_elongation(0)
elif self.motion_started_joy > 0. and now - self.motion_started_joy > self.params['delay_joy']:
if self.params['control_joystick_id'] == 2:
self.servo_axis_rotation(-x)
self.servo_axis_elongation(y)
else:
self.servo_axis_rotation(y)
self.servo_axis_elongation(x)
def publish_button(self):
self.button_pub.publish(Bool(data=self.button.pressed))
def publish_state(self):
# TODO We might want a better state here, get the arena center, get EEF and do the maths as in environment/get_state
if 'm1' in self.js.name and 'm4' in self.js.name:
angle = self.js.position[0] + self.js.position[3]
self.state_pub.publish(CircularState(angle=angle, extended=self.extended))
def _cb_reset(self, request):
rospy.loginfo("Resetting Ergo...")
self.go_to_start(request.slow)
return ResetResponse()
|
n0n0x/fabtools-python | refs/heads/master | fabtools/shorewall.py | 14 | """
Shorewall firewall
==================
"""
from socket import gethostbyname
import re
from fabric.api import hide, settings
from fabtools.utils import run_as_root
def status():
"""
Get the firewall status.
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
res = run_as_root('shorewall status')
return re.search(r'\nShorewall is (\w+)', res).group(1)
def is_started():
"""
Check if the firewall is started.
"""
return status() == 'running'
def is_stopped():
"""
Check if the firewall is stopped.
"""
return status() == 'stopped'
def hosts(hostnames, zone='net'):
"""
Builds a host list suitable for use in a firewall rule.
"""
addresses = [gethostbyname(name) for name in hostnames]
return "%s:%s" % (zone, ','.join(addresses))
def rule(port, action='ACCEPT', source='net', dest='$FW', proto='tcp'):
"""
Helper to build a firewall rule.
Examples::
from fabtools.shorewall import rule
# Rule to accept connections from example.com on port 1234
r1 = rule(port=1234, source=hosts(['example.com']))
# Rule to reject outgoing SMTP connections
r2 = rule(port=25, action='REJECT', source='$FW', dest='net')
"""
return {
'action': action,
'source': source,
'dest': dest,
'proto': proto,
'dest_port': port,
}
def Ping(**kwargs):
"""
Helper to build a firewall rule for ICMP pings.
Extra args will be passed to :py:func:`~fabtools.shorewall.rule`.
"""
return rule(port=8, proto='icmp', **kwargs)
def SSH(port=22, **kwargs):
"""
Helper to build a firewall rule for SSH connections
Extra args will be passed to :py:func:`~fabtools.shorewall.rule`.
"""
return rule(port, **kwargs)
def HTTP(port=80, **kwargs):
"""
Helper to build a firewall rule for HTTP connections
Extra args will be passed to :py:func:`~fabtools.shorewall.rule`.
"""
return rule(port, **kwargs)
def HTTPS(port=443, **kwargs):
"""
Helper to build a firewall rule for HTTPS connections
Extra args will be passed to :py:func:`~fabtools.shorewall.rule`.
"""
return rule(port, **kwargs)
def SMTP(port=25, **kwargs):
"""
Helper to build a firewall rule for SMTP connections
Extra args will be passed to :py:func:`~fabtools.shorewall.rule`.
"""
return rule(port, **kwargs)
|
etherkit/OpenBeacon2 | refs/heads/master | client/macos/venv/lib/python3.8/site-packages/pip/_internal/self_outdated_check.py | 11 | from __future__ import absolute_import
import datetime
import hashlib
import json
import logging
import os.path
import sys
from pip._vendor.packaging import version as packaging_version
from pip._vendor.six import ensure_binary
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.utils.filesystem import (
adjacent_tmp_file,
check_path_owner,
replace,
)
from pip._internal.utils.misc import (
ensure_dir,
get_distribution,
get_installed_version,
)
from pip._internal.utils.packaging import get_installer
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
import optparse
from typing import Any, Dict, Text, Union
from pip._internal.network.session import PipSession
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
def _get_statefile_name(key):
# type: (Union[str, Text]) -> str
key_bytes = ensure_binary(key)
name = hashlib.sha224(key_bytes).hexdigest()
return name
class SelfCheckState(object):
def __init__(self, cache_dir):
# type: (str) -> None
self.state = {} # type: Dict[str, Any]
self.statefile_path = None
# Try to load the existing state
if cache_dir:
self.statefile_path = os.path.join(
cache_dir, "selfcheck", _get_statefile_name(self.key)
)
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError, KeyError):
# Explicitly suppressing exceptions, since we don't want to
# error out if the cache file is invalid.
pass
@property
def key(self):
# type: () -> str
return sys.prefix
def save(self, pypi_version, current_time):
# type: (str, datetime.datetime) -> None
# If we do not have a path to cache in, don't bother saving.
if not self.statefile_path:
return
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
state = {
# Include the key so it's easy to tell which pip wrote the
# file.
"key": self.key,
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
text = json.dumps(state, sort_keys=True, separators=(",", ":"))
with adjacent_tmp_file(self.statefile_path) as f:
f.write(ensure_binary(text))
try:
# Since we have a prefix-specific state file, we can just
# overwrite whatever is there, no need to check.
replace(f.name, self.statefile_path)
except OSError:
# Best effort.
pass
def was_installed_by_pip(pkg):
# type: (str) -> bool
"""Checks whether pkg was installed by pip
This is used not to display the upgrade message when pip is in fact
installed by system package manager, such as dnf on Fedora.
"""
dist = get_distribution(pkg)
if not dist:
return False
return "pip" == get_installer(dist)
def pip_self_version_check(session, options):
# type: (PipSession, optparse.Values) -> None
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if not installed_version:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = SelfCheckState(cache_dir=options.cache_dir)
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
# Lets use PackageFinder to see what the latest pip version is
link_collector = LinkCollector.create(
session,
options=options,
suppress_no_index=True,
)
# Pass allow_yanked=False so we don't suggest upgrading to a
# yanked version.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=False, # Explicitly set to False
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
best_candidate = finder.find_best_candidate("pip").best_candidate
if best_candidate is None:
return
pypi_version = str(best_candidate.version)
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
local_version_is_older = (
pip_version < remote_version and
pip_version.base_version != remote_version.base_version and
was_installed_by_pip('pip')
)
# Determine if our pypi_version is older
if not local_version_is_older:
return
# We cannot tell how the current pip is available in the current
# command context, so be pragmatic here and suggest the command
# that's always available. This does not accommodate spaces in
# `sys.executable`.
pip_cmd = "{} -m pip".format(sys.executable)
logger.warning(
"You are using pip version %s; however, version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
|
lzw120/django | refs/heads/master | build/lib/django/contrib/gis/tests/relatedapp/tests.py | 198 | from __future__ import absolute_import
from datetime import date
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
fyfcauc/android_external_chromium-org | refs/heads/du44 | third_party/tlslite/tlslite/utils/Cryptlib_TripleDES.py | 359 | """Cryptlib 3DES implementation."""
from cryptomath import *
from TripleDES import *
if cryptlibpyLoaded:
def new(key, mode, IV):
return Cryptlib_TripleDES(key, mode, IV)
class Cryptlib_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "cryptlib")
self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_3DES)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key))
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key)
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV)
def __del__(self):
cryptlib_py.cryptDestroyContext(self.context)
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
bytes = stringToBytes(plaintext)
cryptlib_py.cryptEncrypt(self.context, bytes)
return bytesToString(bytes)
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
bytes = stringToBytes(ciphertext)
cryptlib_py.cryptDecrypt(self.context, bytes)
return bytesToString(bytes) |
schleichdi2/OpenNfr_E2_Gui-6.0 | refs/heads/master | lib/python/Screens/TextBox.py | 13 | from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
class TextBox(Screen):
def __init__(self, session, text = ""):
Screen.__init__(self, session)
self.text = text
self["text"] = ScrollLabel(self.text)
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions"],
{
"cancel": self.cancel,
"ok": self.ok,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
}, -1)
def ok(self):
self.close()
def cancel(self):
self.close()
|
michalliu/OpenWrt-Firefly-Libraries | refs/heads/master | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/idle_test/test_grep.py | 50 | """ !Changing this line will break Test_findfile.test_found!
Non-gui unit tests for idlelib.GrepDialog methods.
dummy_command calls grep_it calls findfiles.
An exception raised in one method will fail callers.
Otherwise, tests are mostly independent.
*** Currently only test grep_it.
"""
import unittest
from test.test_support import captured_stdout, findfile
from idlelib.idle_test.mock_tk import Var
from idlelib.GrepDialog import GrepDialog
import re
__file__ = findfile('idlelib/idle_test') + '/test_grep.py'
class Dummy_searchengine:
'''GrepDialog.__init__ calls parent SearchDiabolBase which attaches the
passed in SearchEngine instance as attribute 'engine'. Only a few of the
many possible self.engine.x attributes are needed here.
'''
def getpat(self):
return self._pat
searchengine = Dummy_searchengine()
class Dummy_grep:
# Methods tested
#default_command = GrepDialog.default_command
grep_it = GrepDialog.grep_it.im_func
findfiles = GrepDialog.findfiles.im_func
# Other stuff needed
recvar = Var(False)
engine = searchengine
def close(self): # gui method
pass
grep = Dummy_grep()
class FindfilesTest(unittest.TestCase):
# findfiles is really a function, not a method, could be iterator
# test that filename return filename
# test that idlelib has many .py files
# test that recursive flag adds idle_test .py files
pass
class Grep_itTest(unittest.TestCase):
# Test captured reports with 0 and some hits.
# Should test file names, but Windows reports have mixed / and \ separators
# from incomplete replacement, so 'later'.
def report(self, pat):
grep.engine._pat = pat
with captured_stdout() as s:
grep.grep_it(re.compile(pat), __file__)
lines = s.getvalue().split('\n')
lines.pop() # remove bogus '' after last \n
return lines
def test_unfound(self):
pat = 'xyz*'*7
lines = self.report(pat)
self.assertEqual(len(lines), 2)
self.assertIn(pat, lines[0])
self.assertEqual(lines[1], 'No hits.')
def test_found(self):
pat = '""" !Changing this line will break Test_findfile.test_found!'
lines = self.report(pat)
self.assertEqual(len(lines), 5)
self.assertIn(pat, lines[0])
self.assertIn('py: 1:', lines[1]) # line number 1
self.assertIn('2', lines[3]) # hits found 2
self.assertTrue(lines[4].startswith('(Hint:'))
class Default_commandTest(unittest.TestCase):
# To write this, mode OutputWindow import to top of GrepDialog
# so it can be replaced by captured_stdout in class setup/teardown.
pass
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
|
jonboiser/kolibri | refs/heads/develop | kolibri/core/deviceadmin/utils.py | 4 | import io
import logging
import os
import re
import sys
from datetime import datetime
from django import db
from django.conf import settings
import kolibri
from kolibri.utils.conf import KOLIBRI_HOME
# Import db instead of db.connections because we want to use an instance of
# connections that might be updated from outside.
logger = logging.getLogger(__name__)
# Use encoded text for Python 3 (doesn't work in Python 2!)
KWARGS_IO_READ = {'mode': 'r', 'encoding': 'utf-8'}
KWARGS_IO_WRITE = {'mode': 'w', 'encoding': 'utf-8'}
# Use binary file mode for Python 2 (doesn't work in Python 3!)
if sys.version_info < (3,):
KWARGS_IO_READ = {'mode': 'rb'}
KWARGS_IO_WRITE = {'mode': 'wb'}
class IncompatibleDatabase(Exception):
pass
def default_backup_folder():
return os.path.join(KOLIBRI_HOME, 'backups')
def get_dtm_from_backup_name(fname):
"""
Returns the date time string from our automated backup filenames
"""
p = re.compile(r"^db\-v[^_]+_(?P<dtm>[\d\-_]+).*\.dump$")
m = p.search(fname)
if m:
return m.groups("dtm")[0]
raise ValueError(
"Tried to get date component of unparsed filename: {}".format(fname)
)
def is_full_version(fname):
"""
Tells us if a backup file name is named as if it's from the exact same
version.
Supposes versions do not contain underscores '_'
"""
# Can contain suffixes denoting alpha, beta, post, dev etc.
full_version = kolibri.__version__
return fname.startswith(
"db-v{}_".format(full_version)
)
def dbbackup(old_version, dest_folder=None):
"""
Sqlite3 only
Backup database to dest_folder. Uses SQLite's built in iterdump():
https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.iterdump
Notice that it's important to add at least version and date to the path
of the backup, otherwise you risk that upgrade activities carried out on
the same date overwrite each other. It's also quite important for the user
to know which version of Kolibri that a certain database should match.
:param: dest_folder: Default is ~/.kolibri/backups/db-[version]-[date].dump
:returns: Path of new backup file
"""
if 'sqlite3' not in settings.DATABASES['default']['ENGINE']:
raise IncompatibleDatabase()
if not dest_folder:
dest_folder = default_backup_folder()
# This file name is a convention, used to figure out the latest backup
# that was made (by the dbrestore command)
fname = "db-v{version}_{dtm}.dump".format(
version=old_version,
dtm=datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
backup_path = os.path.join(dest_folder, fname)
# Setting encoding=utf-8: io.open() is Python 2 compatible
# See: https://github.com/learningequality/kolibri/issues/2875
with io.open(backup_path, **KWARGS_IO_WRITE) as f:
# If the connection hasn't been opened yet, then open it
if not db.connections['default'].connection:
db.connections['default'].connect()
for line in db.connections['default'].connection.iterdump():
f.write(line)
return backup_path
def dbrestore(from_file):
"""
Sqlite3 only
Restores the database given a special database dump file containing SQL
statements.
"""
if 'sqlite3' not in settings.DATABASES['default']['ENGINE']:
raise IncompatibleDatabase()
dst_file = settings.DATABASES['default']['NAME']
# Close connections
db.connections.close_all()
# Wipe current database file
if not db.connections['default'].is_in_memory_db():
with open(dst_file, "w") as f:
f.truncate()
else:
logger.info("In memory database, not truncating: {}".format(dst_file))
# Setting encoding=utf-8: io.open() is Python 2 compatible
# See: https://github.com/learningequality/kolibri/issues/2875
with open(from_file, **KWARGS_IO_READ) as f:
db.connections['default'].connect()
db.connections['default'].connection.executescript(
f.read()
)
# Finally, it's okay to import models and open database connections.
# We need this to avoid generating records with identical 'Instance ID'
# and conflicting counters, in case the database we're overwriting had
# already been synced with other devices.:
from morango.models import DatabaseIDModel
DatabaseIDModel.objects.create()
def search_latest(search_root, fallback_version):
logger.info("Searching latest backup in {}...".format(search_root))
newest = None # Should be a path/filename.sqlite3
newest_dtm = None
# All file names have to be according to the fall back version.
prefix = "db-v{}".format(fallback_version)
backups = os.listdir(search_root)
backups = filter(lambda f: f.endswith(".dump"), backups)
backups = filter(lambda f: f.startswith(prefix), backups)
# Everything is sorted alphanumerically, and since dates in the
# filenames behave accordingly, we can now traverse the list
# without having to access meta data, just use the file name.
backups = list(backups)
backups.sort()
for backup in backups:
try:
dtm = get_dtm_from_backup_name(backup)
except ValueError:
continue
# Always pick the newest version
if is_full_version(backup) or dtm > newest_dtm:
newest_dtm = dtm
newest = backup
if newest:
return os.path.join(search_root, newest)
|
zeyuanxy/fast-rcnn | refs/heads/master | lib/datasets/inria.py | 3 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import datasets
import datasets.inria
import os
import datasets.imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
class inria(datasets.imdb):
def __init__(self, image_set, devkit_path):
datasets.imdb.__init__(self, image_set)
self._image_set = image_set
self._devkit_path = devkit_path
self._data_path = os.path.join(self._devkit_path, 'data')
self._classes = ('__background__', # always index 0
'person')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = ['.jpg', '.png']
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
# Specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
assert os.path.exists(self._devkit_path), \
'Devkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
for ext in self._image_ext:
image_path = os.path.join(self._data_path, 'Images',
index + ext)
if os.path.exists(image_path):
break
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._data_path + /ImageSets/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_inria_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
print len(roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(self._devkit_path,
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['all_boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_IJCV_roidb(self):
"""
eturn the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
'{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.
format(self.name, self.config['top_k']))
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)
roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def _load_selective_search_IJCV_roidb(self, gt_roidb):
IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
'selective_search_IJCV_data',
self.name))
assert os.path.exists(IJCV_path), \
'Selective search IJCV data not found at: {}'.format(IJCV_path)
top_k = self.config['top_k']
box_list = []
for i in xrange(self.num_images):
filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
raw_data = sio.loadmat(filename)
box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_inria_annotation(self, index):
"""
Load image and bounding boxes info from txt files of INRIAPerson.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.txt')
# print 'Loading: {}'.format(filename)
with open(filename) as f:
data = f.read()
import re
objs = re.findall('\(\d+, \d+\)[\s\-]+\(\d+, \d+\)', data)
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
coor = re.findall('\d+', obj)
x1 = float(coor[0])
y1 = float(coor[1])
x2 = float(coor[2])
y2 = float(coor[3])
cls = self._class_to_ind['person']
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False}
def _write_inria_results_file(self, all_boxes):
use_salt = self.config['use_salt']
comp_id = 'comp4'
if use_salt:
comp_id += '-{}'.format(os.getpid())
# VOCdevkit/results/comp4-44503_det_test_aeroplane.txt
path = os.path.join(self._devkit_path, 'results', self.name, comp_id + '_')
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} results file'.format(cls)
filename = path + 'det_' + self._image_set + '_' + cls + '.txt'
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
return comp_id
def _do_matlab_eval(self, comp_id, output_dir='output'):
rm_results = self.config['cleanup']
path = os.path.join(os.path.dirname(__file__),
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(datasets.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'setenv(\'LC_ALL\',\'C\'); voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\',{:d}); quit;"' \
.format(self._devkit_path, comp_id,
self._image_set, output_dir, int(rm_results))
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
comp_id = self._write_inria_results_file(all_boxes)
self._do_matlab_eval(comp_id, output_dir)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
d = datasets.inria('train', '')
res = d.roidb
from IPython import embed; embed()
|
EUDAT-B2SHARE/invenio-old | refs/heads/next | modules/bibindex/lib/bibindex_regression_tests.py | 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibIndex Regression Test Suite."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
import os
import re
from datetime import timedelta
from invenio.testutils import make_test_suite, run_test_suite, nottest, InvenioTestCase
from invenio.importutils import lazy_import
WordTable = lazy_import('invenio.bibindex_engine:WordTable')
get_word_tables = lazy_import('invenio.bibindex_engine:get_word_tables')
find_affected_records_for_index = lazy_import('invenio.bibindex_engine:find_affected_records_for_index')
get_recIDs_by_date_authority = lazy_import('invenio.bibindex_engine:get_recIDs_by_date_authority')
get_recIDs_by_date_bibliographic = lazy_import('invenio.bibindex_engine:get_recIDs_by_date_bibliographic')
create_range_list = lazy_import('invenio.bibindex_engine:create_range_list')
beautify_range_list = lazy_import('invenio.bibindex_engine:beautify_range_list')
get_last_updated_all_indexes = lazy_import('invenio.bibindex_engine:get_last_updated_all_indexes')
get_index_id_from_index_name = lazy_import('invenio.bibindex_engine_utils:get_index_id_from_index_name')
get_index_tags = lazy_import('invenio.bibindex_engine_utils:get_index_tags')
get_tag_indexes = lazy_import('invenio.bibindex_engine_utils:get_tag_indexes')
get_all_indexes = lazy_import('invenio.bibindex_engine_utils:get_all_indexes')
from invenio.bibindex_engine_config import CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \
CFG_BIBINDEX_INDEX_TABLE_TYPE, \
CFG_BIBINDEX_UPDATE_MESSAGE
task_low_level_submission = lazy_import('invenio.bibtask:task_low_level_submission')
from invenio.config import CFG_BINDIR, CFG_LOGDIR
run_sql = lazy_import('invenio.dbquery:run_sql')
deserialize_via_marshal = lazy_import('invenio.dbquery:deserialize_via_marshal')
from invenio.intbitset import intbitset
get_record = lazy_import('invenio.search_engine:get_record')
get_fieldvalues = lazy_import('invenio.search_engine_utils:get_fieldvalues')
get_index_strings_by_control_no = lazy_import('invenio.bibauthority_engine:get_index_strings_by_control_no')
get_control_nos_from_recID = lazy_import('invenio.bibauthority_engine:get_control_nos_from_recID')
run_sql_drop_silently = lazy_import('invenio.bibindex_engine_utils:run_sql_drop_silently')
bibupload = lazy_import('invenio.bibupload:bibupload')
xml_marc_to_records = lazy_import('invenio.bibupload:xml_marc_to_records')
wipe_out_record_from_all_tables = lazy_import('invenio.bibupload_regression_tests:wipe_out_record_from_all_tables')
record_get_field_value = lazy_import('invenio.bibrecord:record_get_field_value')
get_max_recid = lazy_import('invenio.bibsort_engine:get_max_recid')
def reindex_for_type_with_bibsched(index_name, force_all=False, *other_options):
"""Runs bibindex for the specified index and returns the task_id.
@param index_name: name of the index to reindex
@param force_all: if it's True function will reindex all records
not just affected ones
"""
program = os.path.join(CFG_BINDIR, 'bibindex')
args = ['bibindex', 'bibindex_regression_tests', '-w', index_name, '-u', 'admin']
args.extend(other_options)
if force_all:
args.append("--force")
task_id = task_low_level_submission(*args)
COMMAND = "%s %s > /dev/null 2> /dev/null" % (program, str(task_id))
os.system(COMMAND)
return task_id
def prepare_for_index_update(index_id, parameters={}):
""" Prepares SQL query for an update of an index in the idxINDEX table.
Takes into account remove_stopwords, remove_html_markup, remove_latex_markup,
tokenizer and last_updated as parameters to change.
remove_html_markup and remove_latex_markup accepts these values:
'' to leave it unchanged
'Yes' to change it to 'Yes'
'No' to change it to 'No'.
For remove_stopwords instead of 'Yes' one must give the name of the file (for example: 'stopwords.kb')
from CFG_ETCDIR/bibrank/ directory pointing at stopwords knowledge base.
For tokenizer please specify the name of the tokenizer.
For last_updated provide a date in format: '2013-01-31 00:00:00'
@param index_id: id of the index to change
@param parameters: dict with names of parameters and their new values
"""
if len(parameters) == 0:
return ''
parameter_set = False
query_update = "UPDATE idxINDEX SET "
for key in parameters:
if parameters[key]:
query_update += parameter_set and ", " or ""
query_update += "%s='%s'" % (key, parameters[key])
parameter_set = True
query_update += " WHERE id=%s" % index_id
return query_update
@nottest
def reindex_word_tables_into_testtables(index_name, recids = None, prefix = 'test', parameters={}, turn_off_virtual_indexes=True):
"""Function for setting up a test enviroment. Reindexes an index with a given name to a
new temporary table with a given prefix. During the reindexing it changes some parameters
of chosen index. It's useful for conducting tests concerning the reindexing process.
Reindexes only idxWORDxxx tables.
@param index_name: name of the index we want to reindex
@param recids: None means reindexing all records, set ids of the records to update only part of them
@param prefix: prefix for the new tabels, if it's set to boolean False function will reindex to original table
@param parameters: dict with parameters and their new values; for more specific
description take a look at 'prepare_for_index_update' function.
@param turn_off_virtual_indexes: if True only specific index will be reindexed
without connected virtual indexes
"""
index_id = get_index_id_from_index_name(index_name)
query_update = prepare_for_index_update(index_id, parameters)
last_updated = run_sql("""SELECT last_updated FROM idxINDEX WHERE id=%s""" % index_id)[0][0]
test_tablename = "%s_idxWORD%02d" % (prefix, index_id)
query_drop_forward_index_table = """DROP TABLE IF EXISTS %sF""" % test_tablename
query_drop_reversed_index_table = """DROP TABLE IF EXISTS %sR""" % test_tablename
query_create_forward_index_table = """CREATE TABLE %sF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % test_tablename
query_create_reversed_index_table = """CREATE TABLE %sR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % test_tablename
run_sql_drop_silently(query_drop_forward_index_table)
run_sql_drop_silently(query_drop_reversed_index_table)
run_sql(query_create_forward_index_table)
run_sql(query_create_reversed_index_table)
if query_update:
run_sql(query_update)
pattern = 'idxWORD'
if prefix:
pattern = '%s_idxWORD' % prefix
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=get_index_tags(index_name),
table_name_pattern= pattern + '%02dF',
wordtable_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
if turn_off_virtual_indexes:
wordTable.turn_off_virtual_indexes()
if recids:
wordTable.add_recIDs(recids, 10000)
else:
recIDs_for_index = find_affected_records_for_index([index_name],
[[1, get_max_recid()]],
True)
bib_recIDs = get_recIDs_by_date_bibliographic([], index_name)
auth_recIDs = get_recIDs_by_date_authority([], index_name)
final_recIDs = bib_recIDs | auth_recIDs
final_recIDs = set(final_recIDs) & set(recIDs_for_index[index_name])
final_recIDs = beautify_range_list(create_range_list(list(final_recIDs)))
wordTable.add_recIDs(final_recIDs, 10000)
return last_updated
@nottest
def remove_reindexed_word_testtables(index_name, prefix = 'test'):
"""
Removes prefix_idxWORDxxx tables created during tests.
@param index_name: name of the index
@param prefix: prefix for the tables
"""
index_id = get_index_id_from_index_name(index_name)
test_tablename = "%s_idxWORD%02d" % (prefix, index_id)
query_drop_forward_index_table = """DROP TABLE IF EXISTS %sF""" % test_tablename
query_drop_reversed_index_table = """DROP TABLE IF EXISTS %sR""" % test_tablename
run_sql(query_drop_forward_index_table)
run_sql(query_drop_reversed_index_table)
class BibIndexRemoveStopwordsTest(InvenioTestCase):
"""Tests remove_stopwords parameter of an index. Changes it in the database
and reindexes from scratch into a new table to see the diffrence which is brought
by change. Uses 'title' index.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'title',
parameters = {'remove_stopwords':'stopwords.kb',
'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 4:
remove_reindexed_word_testtables('title')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('title'),
parameters = {'remove_stopwords':'No',
'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_check_occurrences_of_stopwords_in_testable_word_of(self):
"""Tests if term 'of' is in the new reindexed table"""
query = "SELECT hitlist FROM test_idxWORD08F WHERE term='of'"
res = run_sql(query)
self.assertEqual(0, len(res))
def test_check_occurrences_of_stopwords_in_testable_word_everything(self):
"""Tests if term 'everything' is in the new reindexed table"""
query = "SELECT hitlist FROM test_idxWORD08F WHERE term='everything'"
res = run_sql(query)
self.assertEqual(0, len(res))
def test_compare_non_stopwords_occurrences_in_original_and_test_tables_word_theory(self):
"""Checks if stopwords removing has no influence on indexation of word 'theory' """
word = "theori" #theori not theory, because of default stemming for title index
query = "SELECT hitlist FROM test_idxWORD08F WHERE term='%s'" % word
iset_removed = "iset_removed"
iset_original = "iset_original"
res = run_sql(query)
if res:
iset_removed = intbitset(res[0][0])
query = "SELECT hitlist FROM idxWORD08F WHERE term='%s'" % word
res = run_sql(query)
if res:
iset_original = intbitset(res[0][0])
self.assertEqual(len(iset_removed), len(iset_original))
def test_compare_non_stopwords_occurrences_in_original_and_test_tables_word_on(self):
"""Checks if stopwords removing has no influence on indexation of word 'o(n)' """
word = "o(n)"
query = "SELECT hitlist FROM test_idxWORD08F WHERE term='%s'" % word
iset_removed = "iset_removed"
iset_original = "iset_original"
res = run_sql(query)
if res:
iset_removed = intbitset(res[0][0])
query = "SELECT hitlist FROM idxWORD08F WHERE term='%s'" % word
res = run_sql(query)
if res:
iset_original = intbitset(res[0][0])
self.assertEqual(len(iset_removed), len(iset_original))
class BibIndexRemoveLatexTest(InvenioTestCase):
"""Tests remove_latex_markup parameter of an index. Changes it in the database
and reindexes from scratch into a new table to see the diffrence which is brought
by change. Uses 'abstract' index.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'abstract',
parameters = {'remove_latex_markup':'Yes',
'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 4:
remove_reindexed_word_testtables('abstract')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('abstract'),
parameters = {'remove_latex_markup':'No',
'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_check_occurrences_after_latex_removal_word_u1(self):
"""Tests how many times experssion 'u(1)' occures"""
word = "u(1)"
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
iset = "iset_change"
if res:
iset = intbitset(res[0][0])
self.assertEqual(3, len(iset))
def test_check_exact_occurrences_after_latex_removal_word_theta(self):
"""Tests where experssion 'theta' occures"""
word = "theta"
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([12], ilist)
def test_compare_occurrences_after_and_before_latex_removal_math_expression(self):
"""Checks if latex removal has no influence on indexation of expression 's(u(n_1)*u(n_2))' """
word = 's(u(n_1)*u(n_2))'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist_test = []
if res:
iset = intbitset(res[0][0])
ilist_test = iset.tolist()
word = 's(u(n_1)*u(n_2))'
query = "SELECT hitlist FROM idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist = ["default_not_equal"]
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual(ilist, ilist_test)
def test_check_occurrences_latex_expression_with_u1(self):
"""Tests influence of latex removal on record 80"""
word = '%over u(1)%'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term LIKE '%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([80], ilist)
class BibIndexRemoveHtmlTest(InvenioTestCase):
"""Tests remove_html_markup parameter of an index. Changes it in the database
and reindexes from scratch into a new table to see the diffrence which is brought
by change. Uses 'abstract' index.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'abstract',
parameters = {'remove_html_markup':'Yes',
'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 2:
remove_reindexed_word_testtables('abstract')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('abstract'),
parameters = {'remove_html_markup':'No',
'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_check_occurrences_after_html_removal_tag_p(self):
"""Tests if expression 'water-hog</p>' is not indexed after html markup removal"""
word = 'water-hog</p>'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual(0, len(ilist))
def test_check_occurrences_after_and_before_html_removal_word_style(self):
"""Tests html markup removal influence on expression 'style="width' """
word = 'style="width'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist_test = []
if res:
iset = intbitset(res[0][0])
ilist_test = iset.tolist()
query = "SELECT hitlist FROM idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('abstract'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertNotEqual(ilist, ilist_test)
class BibIndexYearIndexTest(InvenioTestCase):
"""
Checks year index. Tests are diffrent than those inside WebSearch module because
they only test content and reindexation and not the search itself.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'year',
parameters = {'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 3:
remove_reindexed_word_testtables('year')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('year'),
parameters = {'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_occurrences_in_year_index_1973(self):
"""checks content of year index for year 1973"""
word = '1973'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('year'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([34], ilist)
def test_occurrences_in_year_index_2001(self):
"""checks content of year index for year 2001"""
word = '2001'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('year'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([2, 11, 12, 15], ilist)
def test_comparison_for_number_of_items(self):
"""checks the reindexation of year index"""
query_test = "SELECT count(*) FROM test_idxWORD%02dF" % get_index_id_from_index_name('year')
query_orig = "SELECT count(*) FROM idxWORD%02dF" % get_index_id_from_index_name('year')
num_orig = 0
num_test = 1
res = run_sql(query_test)
if res:
num_test = res[0][0]
res = run_sql(query_orig)
if res:
num_orig = res[0][0]
self.assertEqual(num_orig, num_test)
class BibIndexAuthorCountIndexTest(InvenioTestCase):
"""
Checks author count index. Tests are diffrent than those inside WebSearch module because
they only test content and reindexation and not the search itself.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'authorcount',
parameters = {'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 2:
remove_reindexed_word_testtables('authorcount')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('authorcount'),
parameters = {'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_occurrences_in_authorcount_index(self):
"""checks content of authorcount index for papers with 4 authors"""
word = '4'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('authorcount'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([51, 54, 59, 66, 92, 96], ilist)
def test_comparison_for_number_of_items(self):
"""checks the reindexation of authorcount index"""
query_test = "SELECT count(*) FROM test_idxWORD%02dF" % get_index_id_from_index_name('authorcount')
query_orig = "SELECT count(*) FROM idxWORD%02dF" % get_index_id_from_index_name('authorcount')
num_orig = 0
num_test = 1
res = run_sql(query_test)
if res:
num_test = res[0][0]
res = run_sql(query_orig)
if res:
num_orig = res[0][0]
self.assertEqual(num_orig, num_test)
class BibIndexItemCountIndexTest(InvenioTestCase):
"""
Checks item count index. Checks a number of copies of books for records
as well as occurrences of particular number of copies in test data.
"""
def test_occurrences_in_itemcount_index_two_copies(self):
"""checks content of itemcount index for records with two copies of a book"""
word = '2'
query = "SELECT hitlist FROM idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('itemcount'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([31, 34], ilist)
def test_records_for_number_of_copies_record1(self):
"""checks content of itemcount index for record: 1"""
query = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=1" \
% get_index_id_from_index_name('itemcount')
res = run_sql(query)
self.assertEqual(deserialize_via_marshal(res[0][0]),['0'])
def test_records_for_number_of_copies_record30(self):
"""checks content of itemcount index for record: 30"""
query = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=30" \
% get_index_id_from_index_name('itemcount')
res = run_sql(query)
self.assertEqual(deserialize_via_marshal(res[0][0]),['1'])
def test_records_for_number_of_copies_record32(self):
"""checks content of itemcount index for record: 32"""
query = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=32" \
% get_index_id_from_index_name('itemcount')
res = run_sql(query)
self.assertEqual(deserialize_via_marshal(res[0][0]),['3'])
class BibIndexFiletypeIndexTest(InvenioTestCase):
"""
Checks filetype index. Tests are diffrent than those inside WebSearch module because
they only test content and indexation and not the search itself.
"""
def test_occurances_of_tif_filetype(self):
"""tests which records has file with 'tif' extension"""
query = "SELECT hitlist FROM idxWORD%02dF where term='tif'" \
% get_index_id_from_index_name('filetype')
res = run_sql(query)
value = []
if res:
iset = intbitset(res[0][0])
value = iset.tolist()
self.assertEqual(sorted(value), [66, 71])
def test_filetypes_of_records(self):
"""tests files extensions of record 1 and 77"""
query1 = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=1" \
% get_index_id_from_index_name('filetype')
query2 = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=77" \
% get_index_id_from_index_name('filetype')
res1 = run_sql(query1)
res2 = run_sql(query2)
set1 = deserialize_via_marshal(res1[0][0])
set2 = deserialize_via_marshal(res2[0][0])
self.assertEqual(set1, ['gif', 'jpg'])
self.assertEqual(set2, ['pdf', 'ps.gz'])
class BibIndexJournalIndexTest(InvenioTestCase):
"""
Checks journal index. Tests are diffrent than those inside WebSearch module because
they only test content and reindexation and not the search itself.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'journal',
parameters = {'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 2:
remove_reindexed_word_testtables('journal')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('journal'),
parameters = {'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_occurrences_in_journal_index(self):
"""checks content of journal index for phrase: 'prog. theor. phys.' """
word = 'prog. theor. phys.'
query = "SELECT hitlist FROM test_idxWORD%02dF WHERE term='%s'" % (get_index_id_from_index_name('journal'), word)
res = run_sql(query)
ilist = []
if res:
iset = intbitset(res[0][0])
ilist = iset.tolist()
self.assertEqual([86], ilist)
def test_comparison_for_number_of_items(self):
"""checks the reindexation of journal index"""
query_test = "SELECT count(*) FROM test_idxWORD%02dF" % get_index_id_from_index_name('journal')
query_orig = "SELECT count(*) FROM idxWORD%02dF" % get_index_id_from_index_name('journal')
num_orig = 0
num_test = 1
res = run_sql(query_test)
if res:
num_test = res[0][0]
res = run_sql(query_orig)
if res:
num_orig = res[0][0]
self.assertEqual(num_orig, num_test)
class BibIndexCJKTokenizerTitleIndexTest(InvenioTestCase):
"""
Checks CJK tokenization on title index.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
"""reindexation to new table"""
if not self.reindexed:
self.last_updated = reindex_word_tables_into_testtables(
'title',
parameters = {'tokenizer':'BibIndexCJKTokenizer',
'last_updated':'0000-00-00 00:00:00'})
self.reindexed = True
@classmethod
def tearDown(self):
"""cleaning up"""
self.test_counter += 1
if self.test_counter == 2:
remove_reindexed_word_testtables('title')
reverse_changes = prepare_for_index_update(
get_index_id_from_index_name('title'),
parameters = {'tokenizer':'BibIndexDefaultTokenizer',
'last_updated':self.last_updated})
run_sql(reverse_changes)
def test_splliting_and_indexing_CJK_characters_forward_table(self):
"""CJK Tokenizer - searching for a CJK term in title index, forward table"""
query = "SELECT * from test_idxWORD%02dF where term='\xe6\x95\xac'" % get_index_id_from_index_name('title')
res = run_sql(query)
iset = []
if res:
iset = intbitset(res[0][2])
iset = iset.tolist()
self.assertEqual(iset, [104])
def test_splliting_and_indexing_CJK_characters_reversed_table(self):
"""CJK Tokenizer - comparing terms for record with chinese poetry in title index, reverse table"""
query = "SELECT * from test_idxWORD%02dR where id_bibrec='104'" % get_index_id_from_index_name('title')
res = run_sql(query)
iset = []
if res:
iset = deserialize_via_marshal(res[0][1])
self.assertEqual(iset, ['\xe6\x95\xac', '\xe7\x8d\xa8', '\xe4\xba\xad', '\xe5\x9d\x90'])
class BibIndexAuthorityRecordTest(InvenioTestCase):
"""Test if BibIndex correctly knows when to update the index for a
bibliographic record if it is dependent upon an authority record changed
within the given date range"""
def test_authority_record_recently_updated(self):
"""bibindex - reindexing after recently changed authority record"""
authRecID = 118
bibRecID = 9
index_name = 'author'
table = "idxWORD%02dF" % get_index_id_from_index_name(index_name)
reindex_for_type_with_bibsched(index_name)
run_sql("UPDATE bibrec SET modification_date = now() WHERE id = %s", (authRecID,))
# run bibindex again
task_id = reindex_for_type_with_bibsched(index_name, force_all=True)
filename = os.path.join(CFG_LOGDIR, 'bibsched_task_' + str(task_id) + '.log')
_file = open(filename)
text = _file.read() # small file
_file.close()
self.assertTrue(text.find(CFG_BIBINDEX_UPDATE_MESSAGE) >= 0)
self.assertTrue(text.find(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % (table, 1, get_max_recid())) >= 0)
def test_authority_record_enriched_index(self):
"""bibindex - test whether reverse index for bibliographic record
contains words from referenced authority records"""
bibRecID = 9
authority_string = 'jonathan'
index_name = 'author'
table = "idxWORD%02dR" % get_index_id_from_index_name(index_name)
reindex_for_type_with_bibsched(index_name, force_all=True)
self.assertTrue(
authority_string in deserialize_via_marshal(
run_sql("SELECT termlist FROM %s WHERE id_bibrec = %s" % (table, bibRecID))[0][0]
)
)
def test_indexing_of_deleted_authority_record(self):
"""bibindex - no info for indexing from deleted authority record"""
recID = 119 # deleted record
control_nos = get_control_nos_from_recID(recID)
info = get_index_strings_by_control_no(control_nos[0])
self.assertEqual([], info)
def test_authority_record_get_values_by_bibrecID_from_tag(self):
"""bibindex - find authors in authority records for given bibrecID"""
tags = ['100__a']
bibRecID = 9
values = []
for tag in tags:
authority_tag = tag[0:3] + "__0"
control_nos = get_fieldvalues(bibRecID, authority_tag)
for control_no in control_nos:
new_strings = get_index_strings_by_control_no(control_no)
values.extend(new_strings)
self.assertTrue('Ellis, Jonathan Richard' in values)
def insert_record_one_and_second_revision():
"""Inserts test record no. 1 and a second revision for that record"""
rev1 = """<record>
<controlfield tag="001">123456789</controlfield>
<controlfield tag="005">20110101000000.0</controlfield>
<datafield tag ="100" ind1=" " ind2=" ">
<subfield code="a">Close, John</subfield>
<subfield code="u">DESY</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Particles world</subfield>
</datafield>
</record>"""
rev1_final = rev1.replace('<controlfield tag="001">123456789</controlfield>','')
rev1_final = rev1_final.replace('<controlfield tag="005">20110101000000.0</controlfield>','')
rev2 = rev1.replace('<subfield code="a">Close, John</subfield>', '<subfield code="a">Dawkins, Richard</subfield>')
rev2 = rev2.replace('Particles world', 'Particles universe')
rec1 = xml_marc_to_records(rev1_final)
res = bibupload(rec1[0], opt_mode='insert')
_id = res[1]
rec = get_record(_id)
_rev = record_get_field_value(rec, '005', '', '')
#need to index for the first time
indexes = get_all_indexes(virtual=False)
wtabs = get_word_tables(indexes)
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
wordTable.add_recIDs([[_id, _id]], 10000)
#upload the second revision, but don't index
rev2_final = rev2.replace('123456789', str(_id))
rev2_final = rev2_final.replace('20110101000000.0', _rev)
rec2 = xml_marc_to_records(rev2_final)
res = bibupload(rec2[0], opt_mode='correct')
return _id
def insert_record_two_and_second_revision():
"""Inserts test record no. 2 and a revision for that record"""
rev1 = """<record>
<controlfield tag="001">123456789</controlfield>
<controlfield tag="005">20110101000000.0</controlfield>
<datafield tag ="100" ind1=" " ind2=" ">
<subfield code="a">Locke, John</subfield>
<subfield code="u">UNITRA</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Collision course</subfield>
</datafield>
</record>"""
rev1_final = rev1.replace('<controlfield tag="001">123456789</controlfield>','')
rev1_final = rev1_final.replace('<controlfield tag="005">20110101000000.0</controlfield>','')
rev2 = rev1.replace('Collision course', 'Course of collision')
rec1 = xml_marc_to_records(rev1_final)
res = bibupload(rec1[0], opt_mode='insert')
id_bibrec = res[1]
rec = get_record(id_bibrec)
_rev = record_get_field_value(rec, '005', '', '')
#need to index for the first time
indexes = get_all_indexes(virtual=False)
wtabs = get_word_tables(indexes)
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
wordTable.add_recIDs([[id_bibrec, id_bibrec]], 10000)
#upload the second revision, but don't index
rev2_final = rev2.replace('123456789', str(id_bibrec))
rev2_final = rev2_final.replace('20110101000000.0', _rev)
rec2 = xml_marc_to_records(rev2_final)
res = bibupload(rec2[0], opt_mode='correct')
return id_bibrec
def create_index_tables(index_id):
query_create = """CREATE TABLE IF NOT EXISTS idxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM"""
query_create_r = """CREATE TABLE IF NOT EXISTS idxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM"""
run_sql(query_create % index_id)
run_sql(query_create_r % index_id)
def drop_index_tables(index_id):
query_drop = """DROP TABLE IF EXISTS idxWORD%02d%s"""
run_sql(query_drop % (index_id, "F"))
run_sql(query_drop % (index_id, "R"))
def create_virtual_index(index_id, dependent_indexes):
"""creates new virtual index and binds it to specific dependent indexes"""
query = """INSERT INTO idxINDEX (id, name, tokenizer) VALUES (%s, 'testindex', 'BibIndexDefaultTokenizer')"""
run_sql(query % index_id)
query = """INSERT INTO idxINDEX_idxINDEX VALUES (%s, %s)"""
for index in dependent_indexes:
run_sql(query % (index_id, get_index_id_from_index_name(index)))
create_index_tables(index_id)
def remove_virtual_index(index_id):
"""removes tables and other traces after virtual index"""
drop_index_tables(index_id)
query = """DELETE FROM idxINDEX WHERE id=%s""" % index_id
run_sql(query)
query = """DELETE FROM idxINDEX_idxINDEX WHERE id_virtual=%s"""
run_sql(query % index_id)
class BibIndexFindingAffectedIndexes(InvenioTestCase):
"""
Checks if function 'find_affected_records_for_index'
works correctly.
"""
counter = 0
indexes = ['global', 'fulltext', 'caption', 'journal', 'miscellaneous', 'reportnumber', 'year']
@classmethod
def setUp(self):
if self.counter == 0:
self.last_updated = dict(get_last_updated_all_indexes())
res = run_sql("SELECT job_date FROM hstRECORD WHERE id_bibrec=10 AND affected_fields<>''")
self.hst_date = res[0][0]
date_to_set = self.hst_date - timedelta(seconds=1)
for index in self.indexes:
run_sql("""UPDATE idxINDEX SET last_updated=%s
WHERE name=%s""", (str(date_to_set), index))
@classmethod
def tearDown(self):
self.counter += 1
if self.counter >= 8:
for index in self.indexes:
run_sql("""UPDATE idxINDEX SET last_updated=%s
WHERE name=%s""", (self.last_updated[index], index))
def test_find_proper_indexes(self):
"""bibindex - checks if affected indexes are found correctly"""
records_for_indexes = find_affected_records_for_index(get_all_indexes(virtual=False),
[[1,20]])
self.assertEqual(sorted(['miscellaneous', 'fulltext', 'caption', 'journal', 'reportnumber', 'year']),
sorted(records_for_indexes.keys()))
def test_find_proper_recrods_for_miscellaneous_index(self):
"""bibindex - checks if affected recids are found correctly for miscellaneous index"""
records_for_indexes = find_affected_records_for_index(get_all_indexes(virtual=False),
[[1,20]])
self.assertEqual(records_for_indexes['miscellaneous'], [10,12])
def test_find_proper_records_for_year_index(self):
"""bibindex - checks if affected recids are found correctly for year index"""
records_for_indexes = find_affected_records_for_index(get_all_indexes(virtual=False),
[[1,20]])
self.assertEqual(records_for_indexes['year'], [10,12])
def test_find_proper_records_for_caption_index(self):
"""bibindex - checks if affected recids are found correctly for caption index"""
records_for_indexes = find_affected_records_for_index(get_all_indexes(virtual=False),
[[1,100]])
self.assertEqual(records_for_indexes['caption'], [10,12, 55, 98])
def test_find_proper_records_for_journal_index(self):
"""bibindex - checks if affected recids are found correctly for journal index"""
records_for_indexes = find_affected_records_for_index(get_all_indexes(virtual=False),
[[1,100]])
self.assertEqual(records_for_indexes['journal'], [10])
def test_find_proper_records_specified_only_year(self):
"""bibindex - checks if affected recids are found correctly for year index if we specify only year index as input"""
records_for_indexes = find_affected_records_for_index(["year"], [[1, 100]])
self.assertEqual(records_for_indexes["year"], [10, 12, 55])
def test_find_proper_records_force_all(self):
"""bibindex - checks if all recids will be assigned to all specified indexes"""
records_for_indexes = find_affected_records_for_index(["year", "title"], [[10, 15]], True)
self.assertEqual(records_for_indexes["year"], records_for_indexes["title"])
self.assertEqual(records_for_indexes["year"], [10, 11, 12, 13, 14, 15])
def test_find_proper_records_nothing_for_title_index(self):
"""bibindex - checks if nothing was found for title index in range of records: 1 - 20"""
records_for_indexes = find_affected_records_for_index(["title"], [[1, 20]])
self.assertRaises(KeyError, lambda :records_for_indexes["title"])
class BibIndexIndexingAffectedIndexes(InvenioTestCase):
started = False
records = []
counter = 0
@classmethod
def setUp(self):
self.counter += 1
if not self.started:
self.records.append(insert_record_one_and_second_revision())
self.records.append(insert_record_two_and_second_revision())
records_for_indexes = find_affected_records_for_index(get_all_indexes(virtual=False),
[self.records])
wtabs = get_word_tables(records_for_indexes.keys())
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
wordTable.add_recIDs([self.records], 10000)
self.started = True
@classmethod
def tearDown(self):
if self.counter == 3:
for rec in self.records:
wipe_out_record_from_all_tables(rec)
indexes = get_all_indexes(virtual=False)
wtabs = get_word_tables(indexes)
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type = CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
wordTable.del_recIDs([self.records])
def test_proper_content_in_title_index(self):
"""bibindex - checks reindexation of title index for test records.."""
index_id = get_index_id_from_index_name('title')
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec IN (""" % (index_id,)
query = query + ", ".join(map(str, self.records)) + ")"
resp = run_sql(query)
affiliation_rec1 = deserialize_via_marshal(resp[0][0])
affiliation_rec2 = deserialize_via_marshal(resp[1][0])
self.assertEqual(['univers', 'particl'], affiliation_rec1)
self.assertEqual(['of', 'cours', 'collis'], affiliation_rec2)
def test_proper_content_in_author_index(self):
"""bibindex - checks reindexation of author index for test records.."""
index_id = get_index_id_from_index_name('author')
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec IN (""" % (index_id,)
query = query + ", ".join(map(str, self.records)) + ")"
resp = run_sql(query)
author_rec1 = deserialize_via_marshal(resp[0][0])
author_rec2 = deserialize_via_marshal(resp[1][0])
self.assertEqual(['dawkins', 'richard', ], author_rec1)
self.assertEqual(['john', 'locke'], author_rec2)
def test_proper_content_in_global_index(self):
"""bibindex - checks reindexation of global index for test records.."""
index_id = get_index_id_from_index_name('global')
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec IN (""" % (index_id,)
query = query + ", ".join(map(str, self.records)) + ")"
resp = run_sql(query)
global_rec1 = deserialize_via_marshal(resp[0][0])
global_rec2 = deserialize_via_marshal(resp[1][0])
self.assertEqual(True, 'dawkin' in global_rec1)
self.assertEqual(False, 'close' in global_rec1)
self.assertEqual(True, 'univers' in global_rec1)
self.assertEqual(True, 'john' in global_rec2)
self.assertEqual(False, 'john' in global_rec1)
class BibIndexFindingIndexesForTags(InvenioTestCase):
""" Tests function 'get_tag_indexes' """
def test_fulltext_tag_virtual_indexes_on(self):
"""bibindex - checks if 'get_tag_indexes' for tag 8564_u will find only 'fulltext' index"""
self.assertEqual(('fulltext',), zip(*get_tag_indexes('8564_u'))[1])
def test_title_tag_virtual_indexes_on(self):
"""bibindex - checks if 'get_tag_indexes' for tag 245__% will find also 'global' index"""
self.assertEqual(('title', 'exacttitle', 'global'), zip(*get_tag_indexes('245__%'))[1])
def test_title_tag_virtual_indexes_off(self):
"""bibindex - checks if 'get_tag_indexes' for tag 245__% wont find 'global' index (with virtual=False)"""
self.assertEqual(('title', 'exacttitle'), zip(*get_tag_indexes('245__%', virtual=False))[1])
def test_author_tag_virtual_indexes_on(self):
"""bibindex - checks 'get_tag_indexes' for tag '100'"""
self.assertEqual(('author', 'affiliation', 'exactauthor', 'firstauthor',
'exactfirstauthor', 'authorcount', 'authorityauthor',
'miscellaneous', 'global'),
zip(*get_tag_indexes('100'))[1])
def test_author_exact_tag_virtual_indexes_off(self):
"""bibindex - checks 'get_tag_indexes' for tag '100__a'"""
self.assertEqual(('author', 'exactauthor', 'firstauthor',
'exactfirstauthor', 'authorcount',
'authorityauthor', 'miscellaneous'),
zip(*get_tag_indexes('100__a', virtual=False))[1])
def test_wide_tag_virtual_indexes_off(self):
"""bibindex - checks 'get_tag_indexes' for tag like '86%'"""
self.assertEqual(('miscellaneous',), zip(*get_tag_indexes('86%', virtual=False))[1])
def test_909_tags_in_misc_index(self):
"""bibindex - checks connection between misc index and tags: 909C1%, 909C4%"""
self.assertEqual(('miscellaneous',), zip(*get_tag_indexes('909C1%', virtual=False))[1])
self.assertEqual('miscellaneous' in zip(*get_tag_indexes('909C4%', virtual=False))[1], False)
def test_year_tag_virtual_indexes_on(self):
"""bibindex - checks 'get_tag_indexes' for tag 909C0y"""
self.assertEqual(('year', 'global'), zip(*get_tag_indexes('909C0y'))[1])
def test_wide_tag_authority_index_virtual_indexes_off(self):
"""bibindex - checks 'get_tag_indexes' for tag like '15%'"""
self.assertEqual(('authoritysubject', 'miscellaneous'), zip(*get_tag_indexes('15%',virtual=False))[1])
class BibIndexFindingTagsForIndexes(InvenioTestCase):
""" Tests function 'get_index_tags' """
def test_tags_for_author_index(self):
"""bibindex - checks if 'get_index_tags' find proper tags for 'author' index """
self.assertEqual(get_index_tags('author'), ['100__a', '700__a'])
def test_tags_for_global_index_virtual_indexes_off(self):
"""bibindex - checks if 'get_index_tags' find proper tags for 'global' index """
self.assertEqual(get_index_tags('global', virtual=False),[])
def test_tags_for_global_index_virtual_indexes_on(self):
"""bibindex - checks if 'get_index_tags' find proper tags for 'global' index """
tags = get_index_tags('global')
self.assertEqual('86%' in tags, True)
self.assertEqual('100__a' in tags, True)
self.assertEqual('245__%' in tags, True)
class BibIndexGlobalIndexContentTest(InvenioTestCase):
""" Tests if virtual global index is correctly indexed"""
def is_part_of(self, container, content):
"""checks if content is a part of container"""
ctr = set(container)
cont = set(content)
return cont.issubset(ctr)
def test_title_index_compatibility_reversed_table(self):
"""bibindex - checks if the same words are in title and global index, reversed table"""
global_id = get_index_id_from_index_name('global')
title_id = get_index_id_from_index_name('title')
for rec in range(1, 4):
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s""" % (title_id, rec)
res = run_sql(query)
termlist_title = deserialize_via_marshal(res[0][0])
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s""" % (global_id, rec)
glob = run_sql(query)
termlist_global = deserialize_via_marshal(glob[0][0])
self.assertEqual(self.is_part_of(termlist_global, termlist_title), True)
def test_abstract_index_compatibility_reversed_table(self):
"""bibindex - checks if the same words are in abstract and global index, reversed table"""
global_id = get_index_id_from_index_name('global')
abstract_id = get_index_id_from_index_name('abstract')
for rec in range(6, 9):
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s""" % (abstract_id, rec)
res = run_sql(query)
termlist_abstract = deserialize_via_marshal(res[0][0])
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s""" % (global_id, rec)
glob = run_sql(query)
termlist_global = deserialize_via_marshal(glob[0][0])
self.assertEqual(self.is_part_of(termlist_global, termlist_abstract), True)
def test_misc_index_compatibility_reversed_table(self):
"""bibindex - checks if the same words are in misc and global index, reversed table"""
global_id = get_index_id_from_index_name('global')
misc_id = get_index_id_from_index_name('miscellaneous')
for rec in range(10, 14):
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s""" % (misc_id, rec)
res = run_sql(query)
termlist_misc = deserialize_via_marshal(res[0][0])
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s""" % (global_id, rec)
glob = run_sql(query)
termlist_global = deserialize_via_marshal(glob[0][0])
self.assertEqual(self.is_part_of(termlist_global, termlist_misc), True)
def test_journal_index_compatibility_forward_table(self):
"""bibindex - checks if the same words are in journal and global index, forward table"""
global_id = get_index_id_from_index_name('global')
journal_id = get_index_id_from_index_name('journal')
query = """SELECT term FROM idxWORD%02dF""" % journal_id
res = zip(*run_sql(query))[0]
query = """SELECT term FROM idxWORD%02dF""" % global_id
glob = zip(*run_sql(query))[0]
self.assertEqual(self.is_part_of(glob, res), True)
def test_keyword_index_compatibility_forward_table(self):
"""bibindex - checks if the same pairs are in keyword and global index, forward table"""
global_id = get_index_id_from_index_name('global')
keyword_id = get_index_id_from_index_name('keyword')
query = """SELECT term FROM idxPAIR%02dF""" % keyword_id
res = zip(*run_sql(query))[0]
query = """SELECT term FROM idxPAIR%02dF""" % global_id
glob = zip(*run_sql(query))[0]
self.assertEqual(self.is_part_of(glob, res), True)
def test_affiliation_index_compatibility_forward_table(self):
"""bibindex - checks if the same phrases are in affiliation and global index, forward table"""
global_id = get_index_id_from_index_name('global')
affiliation_id = get_index_id_from_index_name('affiliation')
query = """SELECT term FROM idxPHRASE%02dF""" % affiliation_id
res = zip(*run_sql(query))[0]
query = """SELECT term FROM idxPHRASE%02dF""" % global_id
glob = zip(*run_sql(query))[0]
self.assertEqual(self.is_part_of(glob, res), True)
class BibIndexVirtualIndexAlsoChangesTest(InvenioTestCase):
""" Tests if virtual index changes after changes in dependent index"""
counter = 0
indexes = ["title"]
_id = 39
@classmethod
def prepare_virtual_index(self):
"""creates new virtual index and binds it to specific normal index"""
create_virtual_index(self._id, self.indexes)
wtabs = get_word_tables(self.indexes)
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
wordTable.add_recIDs([[1, 10]], 1000)
@classmethod
def reindex_virtual_index(self, special_tokenizer=False):
"""reindexes virtual and dependent indexes with different tokenizer"""
def tokenize_for_words(phrase):
return phrase.split(" ")
wtabs = get_word_tables(self.indexes)
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexEmptyTokenizer"},
wash_index_terms=50)
if special_tokenizer == True:
wordTable.default_tokenizer_function = tokenize_for_words
wordTable.add_recIDs([[1, 10]], 1000)
@classmethod
def setUp(self):
self.counter += 1
if self.counter == 1:
self.prepare_virtual_index()
elif self.counter == 2:
self.reindex_virtual_index(special_tokenizer=True)
@classmethod
def tearDown(self):
if self.counter == 3:
self.reindex_virtual_index()
elif self.counter == 4:
remove_virtual_index(self._id)
def test_virtual_index_1_has_10_records(self):
"""bibindex - checks if virtual index was filled with only ten records from title index"""
query = "SELECT count(*) FROM idxWORD%02dR" % self._id
self.assertEqual(10, run_sql(query)[0][0])
def test_virtual_index_2_correct_content_record_1(self):
"""bibindex - after reindexing with different tokenizer virtual index also changes - record 1"""
query = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s" % (self._id, 1)
self.assertEqual('Higgs' in deserialize_via_marshal(run_sql(query)[0][0]), True)
def test_virtual_index_3_correct_content_record_3(self):
"""bibindex - after reindexing with different tokenizer virtual index also changes - record 3"""
query = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s" % (self._id, 3)
self.assertEqual(['Conference', 'Biology', 'Molecular', 'European'],
deserialize_via_marshal(run_sql(query)[0][0]))
def test_virtual_index_4_cleaned_up(self):
"""bibindex - after reindexing with normal title tokenizer everything is back to normal"""
#this is version of test for installation with PyStemmer package
#without this package word 'biology' is stemmed differently
query = "SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=%s" % (self._id, 3)
self.assertEqual(['biolog', 'molecular', 'confer', 'european'],
deserialize_via_marshal(run_sql(query)[0][0]))
class BibIndexVirtualIndexRemovalTest(InvenioTestCase):
counter = 0
indexes = ["authorcount", "journal", "year"]
_id = 40
@classmethod
def setUp(self):
self.counter += 1
if self.counter == 1:
create_virtual_index(self._id, self.indexes)
wtabs = get_word_tables(self.indexes)
for index_id, index_name, index_tags in wtabs:
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
wordtable_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
tag_to_tokenizer_map={'8564_u': "BibIndexFulltextTokenizer"},
wash_index_terms=50)
wordTable.add_recIDs([[1, 113]], 1000)
#removal part
w = WordTable("testindex", self._id, [], "idxWORD%02dF", CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"], {}, 50)
w.remove_dependent_index(int(get_index_id_from_index_name("authorcount")))
@classmethod
def tearDown(self):
if self.counter == 9:
remove_virtual_index(self._id)
def test_authorcount_removal_number_of_items(self):
"""bibindex - checks virtual index after authorcount index removal - number of items"""
query = """SELECT count(*) FROM idxWORD%02dF"""
res = run_sql(query % self._id)
self.assertEqual(157, res[0][0])
def test_authorcount_removal_common_terms_intact(self):
"""bibindex - checks virtual index after authorcount index removal - common terms"""
query = """SELECT term FROM idxWORD%02dF WHERE term IN ('10', '2', '4', '7')"""
res = run_sql(query % self._id)
self.assertEqual(4, len(res))
def test_authorcount_removal_no_315_term(self):
"""bibindex - checks virtual index after authorcount index removal - no '315' term in virtual index"""
query = """SELECT term FROM idxWORD%02dF WHERE term='315'"""
res = run_sql(query % self._id)
self.assertEqual(0, len(res))
def test_authorcount_removal_term_10_hitlist(self):
"""bibindex - checks virtual index after authorcount index removal - hitlist for '10' term"""
query = """SELECT hitlist FROM idxWORD%02dF WHERE term='10'"""
res = run_sql(query % self._id)
self.assertEqual([80, 92], intbitset(res[0][0]).tolist())
def test_authorcount_removal_term_1985_hitlist(self):
"""bibindex - checks virtual index after authorcount index removal - hitlist for '1985' term"""
query = """SELECT hitlist FROM idxWORD%02dF WHERE term='1985'"""
res = run_sql(query % self._id)
self.assertEqual([16, 18], intbitset(res[0][0]).tolist())
def test_authorcount_removal_record_16_hitlist(self):
"""bibindex - checks virtual index after authorcount index removal - termlist for record 16"""
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=16"""
res = run_sql(query % self._id)
self.assertEqual(['1985'], deserialize_via_marshal(res[0][0]))
def test_authorcount_removal_record_10_hitlist(self):
"""bibindex - checks virtual index after authorcount index removal - termlist for record 10"""
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=10"""
res = run_sql(query % self._id)
self.assertEqual(['2002', 'Eur. Phys. J., C'], deserialize_via_marshal(res[0][0]))
def test_year_removal_number_of_items(self):
"""bibindex - checks virtual index after year removal - number of items"""
#must be run after: tearDown
w = WordTable("testindex", self._id, [], "idxWORD%02dF", CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"], {}, 50)
w.remove_dependent_index(int(get_index_id_from_index_name("year")))
query = """SELECT count(*) FROM idxWORD%02dF"""
res = run_sql(query % self._id)
self.assertEqual(134, res[0][0])
def test_year_removal_record_18_hitlist(self):
"""bibindex - checks virtual index after year removal - termlist for record 18"""
#must be run after: tearDown, test_year_removal_number_of_items
query = """SELECT termlist FROM idxWORD%02dR WHERE id_bibrec=18"""
res = run_sql(query % self._id)
self.assertEqual(['151', '357','1985', 'Phys. Lett., B 151 (1985) 357', 'Phys. Lett., B'],
deserialize_via_marshal(res[0][0]))
class BibIndexCLICallTest(InvenioTestCase):
"""Tests if calls to bibindex from CLI (bibsched deamon) are run correctly"""
def test_correct_message_for_wrong_index_names(self):
"""bibindex - checks if correct message for wrong index appears"""
index_name = "titlexrg"
task_id = reindex_for_type_with_bibsched(index_name, force_all=True)
filename = os.path.join(CFG_LOGDIR, 'bibsched_task_' + str(task_id) + '.log')
fl = open(filename)
text = fl.read() # small file
fl.close()
self.assertTrue(text.find("Specified indexes can't be found.") >= 0)
def test_correct_message_for_up_to_date_indexes(self):
"""bibindex - checks if correct message for index up to date appears"""
index_name = "abstract"
task_id = reindex_for_type_with_bibsched(index_name)
filename = os.path.join(CFG_LOGDIR, 'bibsched_task_' + str(task_id) + '.log')
fl = open(filename)
text = fl.read() # small file
fl.close()
self.assertTrue(text.find("Selected indexes/recIDs are up to date.") >= 0)
TEST_SUITE = make_test_suite(BibIndexRemoveStopwordsTest,
BibIndexRemoveLatexTest,
BibIndexRemoveHtmlTest,
BibIndexYearIndexTest,
BibIndexAuthorCountIndexTest,
BibIndexItemCountIndexTest,
BibIndexFiletypeIndexTest,
BibIndexJournalIndexTest,
BibIndexCJKTokenizerTitleIndexTest,
BibIndexAuthorityRecordTest,
BibIndexFindingAffectedIndexes,
BibIndexIndexingAffectedIndexes,
BibIndexFindingIndexesForTags,
BibIndexFindingTagsForIndexes,
BibIndexGlobalIndexContentTest,
BibIndexVirtualIndexAlsoChangesTest,
BibIndexVirtualIndexRemovalTest,
BibIndexCLICallTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
Acehaidrey/incubator-airflow | refs/heads/master | airflow/sensors/external_task_sensor.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
from typing import FrozenSet, Optional, Union
from sqlalchemy import func
from airflow.exceptions import AirflowException
from airflow.models import BaseOperatorLink, DagBag, DagModel, DagRun, TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.helpers import build_airflow_url_with_query
from airflow.utils.session import provide_session
from airflow.utils.state import State
class ExternalTaskSensorLink(BaseOperatorLink):
"""
Operator link for ExternalTaskSensor. It allows users to access
DAG waited with ExternalTaskSensor.
"""
name = 'External DAG'
def get_link(self, operator, dttm):
query = {"dag_id": operator.external_dag_id, "execution_date": dttm.isoformat()}
return build_airflow_url_with_query(query)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a different DAG or a task in a different DAG to complete for a
specific execution_date
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: str
:param external_task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:type external_task_id: str or None
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param failed_states: list of failed or dis-allowed states, default is ``None``
:type failed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task or DAG.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: Optional[datetime.timedelta]
:param execution_date_fn: function that receives the current execution date as the first
positional argument and optionally any number of keyword arguments available in the
context dictionary, and returns the desired execution dates to query.
Either execution_delta or execution_date_fn can be passed to ExternalTaskSensor,
but not both.
:type execution_date_fn: Optional[Callable]
:param check_existence: Set to `True` to check if the external task exists (when
external_task_id is not None) or check if the DAG to wait for exists (when
external_task_id is None), and immediately cease waiting if the external task
or DAG does not exist (default value: False).
:type check_existence: bool
"""
template_fields = ['external_dag_id', 'external_task_id']
ui_color = '#19647e'
@property
def operator_extra_links(self):
"""Return operator extra links"""
return [ExternalTaskSensorLink()]
@apply_defaults
def __init__(
self,
*,
external_dag_id,
external_task_id=None,
allowed_states=None,
failed_states=None,
execution_delta=None,
execution_date_fn=None,
check_existence=False,
**kwargs,
):
super().__init__(**kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
self.failed_states = failed_states or []
total_states = self.allowed_states + self.failed_states
total_states = set(total_states)
if set(self.failed_states).intersection(set(self.allowed_states)):
raise AirflowException(
"Duplicate values provided as allowed "
"`{}` and failed states `{}`".format(self.allowed_states, self.failed_states)
)
if external_task_id:
if not total_states <= set(State.task_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` is not `None`: {State.task_states}'
)
elif not total_states <= set(State.dag_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` is `None`: {State.dag_states}'
)
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_delta` or `execution_date_fn` may '
'be provided to ExternalTaskSensor; not both.'
)
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
self.check_existence = check_existence
# we only check the existence for the first time.
self.has_checked_existence = False
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self._handle_execution_date_fn(context=context)
else:
dttm = context['execution_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join([datetime.isoformat() for datetime in dttm_filter])
self.log.info(
'Poking for %s.%s on %s ... ', self.external_dag_id, self.external_task_id, serialized_dttm_filter
)
DM = DagModel
# we only do the check for 1st time, no need for subsequent poke
if self.check_existence and not self.has_checked_existence:
dag_to_wait = session.query(DM).filter(DM.dag_id == self.external_dag_id).first()
if not dag_to_wait:
raise AirflowException(f'The external DAG {self.external_dag_id} does not exist.')
elif not os.path.exists(dag_to_wait.fileloc):
raise AirflowException(f'The external DAG {self.external_dag_id} was deleted.')
if self.external_task_id:
refreshed_dag_info = DagBag(dag_to_wait.fileloc).get_dag(self.external_dag_id)
if not refreshed_dag_info.has_task(self.external_task_id):
raise AirflowException(
f'The external task {self.external_task_id} in '
f'DAG {self.external_dag_id} does not exist.'
)
self.has_checked_existence = True
count_allowed = self.get_count(dttm_filter, session, self.allowed_states)
count_failed = -1
if len(self.failed_states) > 0:
count_failed = self.get_count(dttm_filter, session, self.failed_states)
session.commit()
if count_failed == len(dttm_filter):
if self.external_task_id:
raise AirflowException(
f'The external task {self.external_task_id} in DAG {self.external_dag_id} failed.'
)
else:
raise AirflowException(f'The external DAG {self.external_dag_id} failed.')
return count_allowed == len(dttm_filter)
def get_count(self, dttm_filter, session, states):
"""
Get the count of records against dttm filter and states
:param dttm_filter: date time filter for execution date
:type dttm_filter: list
:param session: airflow session object
:type session: SASession
:param states: task or dag states
:type states: list
:return: count of record against the filters
"""
TI = TaskInstance
DR = DagRun
if self.external_task_id:
# .count() is inefficient
count = (
session.query(func.count())
.filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(states), # pylint: disable=no-member
TI.execution_date.in_(dttm_filter),
)
.scalar()
)
else:
# .count() is inefficient
count = (
session.query(func.count())
.filter(
DR.dag_id == self.external_dag_id,
DR.state.in_(states), # pylint: disable=no-member
DR.execution_date.in_(dttm_filter),
)
.scalar()
)
return count
def _handle_execution_date_fn(self, context):
"""
This function is to handle backwards compatibility with how this operator was
previously where it only passes the execution date, but also allow for the newer
implementation to pass all context variables as keyword arguments, to allow
for more sophisticated returns of dates to return.
"""
from airflow.utils.operator_helpers import make_kwargs_callable
# Remove "execution_date" because it is already a mandatory positional argument
execution_date = context["execution_date"]
kwargs = {k: v for k, v in context.items() if k != "execution_date"}
# Add "context" in the kwargs for backward compatibility (because context used to be
# an acceptable argument of execution_date_fn)
kwargs["context"] = context
kwargs_callable = make_kwargs_callable(self.execution_date_fn)
return kwargs_callable(execution_date, **kwargs)
class ExternalTaskMarker(DummyOperator):
"""
Use this operator to indicate that a task on a different DAG depends on this task.
When this task is cleared with "Recursive" selected, Airflow will clear the task on
the other DAG and its downstream tasks recursively. Transitive dependencies are followed
until the recursion_depth is reached.
:param external_dag_id: The dag_id that contains the dependent task that needs to be cleared.
:type external_dag_id: str
:param external_task_id: The task_id of the dependent task that needs to be cleared.
:type external_task_id: str
:param execution_date: The execution_date of the dependent task that needs to be cleared.
:type execution_date: str or datetime.datetime
:param recursion_depth: The maximum level of transitive dependencies allowed. Default is 10.
This is mostly used for preventing cyclic dependencies. It is fine to increase
this number if necessary. However, too many levels of transitive dependencies will make
it slower to clear tasks in the web UI.
"""
template_fields = ['external_dag_id', 'external_task_id', 'execution_date']
ui_color = '#19647e'
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: Optional[FrozenSet[str]] = None
@apply_defaults
def __init__(
self,
*,
external_dag_id,
external_task_id,
execution_date: Optional[Union[str, datetime.datetime]] = "{{ execution_date.isoformat() }}",
recursion_depth: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
if isinstance(execution_date, datetime.datetime):
self.execution_date = execution_date.isoformat()
elif isinstance(execution_date, str):
self.execution_date = execution_date
else:
raise TypeError(
'Expected str or datetime.datetime type for execution_date. Got {}'.format(
type(execution_date)
)
)
if recursion_depth <= 0:
raise ValueError("recursion_depth should be a positive integer")
self.recursion_depth = recursion_depth
@classmethod
def get_serialized_fields(cls):
"""Serialized ExternalTaskMarker contain exactly these fields + templated_fields ."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(super().get_serialized_fields() | {"recursion_depth"})
return cls.__serialized_fields
|
nilbody/h2o-3 | refs/heads/master | py/h2o.py | 2 | import sys, os, getpass, logging, time, inspect, requests, json, pprint
import h2o_test_utils
from h2o_test_utils import log, log_rest
import h2o_print as h2p
class H2O(object):
# static (class) variables
ipaddr_from_cmd_line = None
debugger = False
json_url_history = []
python_test_name = inspect.stack()[1][1]
verbose = False
experimental_algos = ["pca", "svd", "glrm"]
## TODO: support api_version parameter for all api calls!
# Also a global in the H2O object set at creation time.
# TODO: ensure that all of this is really necessary:
def __init__(self,
use_this_ip_addr=None, port=54321, capture_output=True,
use_debugger=None, classpath=None,
use_hdfs=False, use_maprfs=False,
# hdfs_version="cdh4", hdfs_name_node="192.168.1.151",
# hdfs_version="cdh3", hdfs_name_node="192.168.1.176",
hdfs_version=None, hdfs_name_node=None, hdfs_config=None,
aws_credentials=None,
use_flatfile=False, java_heap_GB=None, java_heap_MB=None, java_extra_args=None,
use_home_for_ice=False, node_id=None, username=None,
random_udp_drop=False,
redirect_import_folder_to_s3_path=None,
redirect_import_folder_to_s3n_path=None,
disable_h2o_log=False,
enable_benchmark_log=False,
h2o_remote_buckets_root=None,
delete_keys_at_teardown=False,
cloud_name=None,
):
if use_hdfs:
# see if we can touch a 0xdata machine
try:
# long timeout in ec2...bad
a = requests.get('http://192.168.1.176:80', timeout=1)
hdfs_0xdata_visible = True
except:
hdfs_0xdata_visible = False
# different defaults, depending on where we're running
if hdfs_name_node is None:
if hdfs_0xdata_visible:
hdfs_name_node = "192.168.1.176"
else: # ec2
hdfs_name_node = "10.78.14.235:9000"
if hdfs_version is None:
if hdfs_0xdata_visible:
hdfs_version = "cdh3"
else: # ec2
hdfs_version = "0.20.2"
self.redirect_import_folder_to_s3_path = redirect_import_folder_to_s3_path
self.redirect_import_folder_to_s3n_path = redirect_import_folder_to_s3n_path
self.aws_credentials = aws_credentials
self.port = port
# None is legal for self.addr.
# means we won't give an ip to the jar when we start.
# Or we can say use use_this_ip_addr=127.0.0.1, or the known address
# if use_this_addr is None, use 127.0.0.1 for urls and json
# Command line arg 'ipaddr_from_cmd_line' dominates:
if H2O.ipaddr_from_cmd_line:
self.addr = H2O.ipaddr_from_cmd_line
else:
self.addr = use_this_ip_addr
if self.addr is not None:
self.http_addr = self.addr
else:
self.http_addr = get_ip_address()
# command line should always dominate for enabling
if H2O.debugger: use_debugger = True
self.use_debugger = use_debugger
self.classpath = classpath
self.capture_output = capture_output
self.use_hdfs = use_hdfs
self.use_maprfs = use_maprfs
self.hdfs_name_node = hdfs_name_node
self.hdfs_version = hdfs_version
self.hdfs_config = hdfs_config
self.use_flatfile = use_flatfile
self.java_heap_GB = java_heap_GB
self.java_heap_MB = java_heap_MB
self.java_extra_args = java_extra_args
self.use_home_for_ice = use_home_for_ice
self.node_id = node_id
if username:
self.username = username
else:
self.username = getpass.getuser()
# don't want multiple reports from tearDown and tearDownClass
# have nodes[0] remember (0 always exists)
self.sandbox_error_was_reported = False
self.sandbox_ignore_errors = False
self.random_udp_drop = random_udp_drop
self.disable_h2o_log = disable_h2o_log
# this dumps stats from tests, and perf stats while polling to benchmark.log
self.enable_benchmark_log = enable_benchmark_log
self.h2o_remote_buckets_root = h2o_remote_buckets_root
self.delete_keys_at_teardown = delete_keys_at_teardown
if cloud_name:
self.cloud_name = cloud_name
else:
self.cloud_name = 'pytest-%s-%s' % (getpass.getuser(), os.getpid())
'''
Printable string representation of an H2O node object.
'''
def __str__(self):
return '%s - http://%s:%d/' % (type(self), self.http_addr, self.port)
# TODO: UGH, move this.
@staticmethod
def verboseprint(*args, **kwargs):
if H2O.verbose:
for x in args: # so you don't have to create a single string
print x,
for x in kwargs: # so you don't have to create a single string
print x,
print
sys.stdout.flush()
def __url(self, loc, port=None):
# always use the new api port
if port is None: port = self.port
if loc.startswith('/'):
delim = ''
else:
delim = '/'
u = 'http://%s:%d%s%s' % (self.http_addr, port, delim, loc)
return u
'''
Make a REST request to the h2o server and if succesful return a dict containing the JSON result.
'''
# @profile
def __do_json_request(self, jsonRequest=None, fullUrl=None, timeout=10, params=None, postData=None, returnFast=False,
cmd='get', extraComment=None, ignoreH2oError=False, noExtraErrorCheck=False, raiseIfNon200=True, suppressErrorMsg=False, **kwargs):
H2O.verboseprint("__do_json_request, timeout: " + str(timeout))
# if url param is used, use it as full url. otherwise crate from the jsonRequest
if fullUrl:
url = fullUrl
else:
url = self.__url(jsonRequest)
# remove any params that are 'None'
# need to copy dictionary, since can't delete while iterating
if params is not None:
params_serialized = params.copy()
for k in params_serialized:
if params_serialized[k] is None:
del params[k]
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
# The requests package takes array parameters and explodes them: ['f00', 'b4r'] becomes "f00,b4r".
# NOTE: this handles 1D arrays only; if we need ND this needs to be recursive.
# NOTE: we currently don't need to do this for GET, so that's not implemented.
if postData is not None:
munged_postData = {}
for k, v in postData.iteritems():
if type(v) is list:
if len(v) == 0:
munged_postData[k] = '[]'
else:
first = True
array_str = '['
for val in v:
if not first: array_str += ', '
if val is None:
array_str += 'null'
elif isinstance(val, basestring):
array_str += "\"" + str(val) + "\""
else:
array_str += str(val)
first = False
array_str += ']'
munged_postData[k] = array_str
elif type(v) is dict:
if len(v) == 0:
munged_postData[k] = '{}'
else:
first = True
map_str = '{'
for key, val in v.iteritems():
if not first: map_str += ', '
if val is None:
map_str += "\"" + key + "\"" + ': null'
elif isinstance(val, basestring):
map_str += "\"" + str(key) + "\"" + ":" + "\"" + str(val) + "\""
else:
map_str += "\"" + key + "\"" + ':' + str(val)
first = False
map_str += '}'
munged_postData[k] = map_str
else:
# not list:
munged_postData[k] = v
else:
# None
munged_postData = postData
# print("munged_postData: " + repr(munged_postData))
if extraComment:
log('Start ' + url + paramsStr, comment=extraComment)
else:
log('Start ' + url + paramsStr)
log_rest("")
log_rest("----------------------------------------------------------------------\n")
if extraComment:
log_rest("# Extra comment info about this request: " + extraComment)
if cmd == 'get':
log_rest("GET")
else:
log_rest("POST")
log_rest(url + paramsStr)
# file get passed thru kwargs here
try:
if 'post' == cmd:
# NOTE == cmd: for now, since we don't have deserialization from JSON in h2o-dev, we use form-encoded POST.
# This is temporary.
#
# This following does application/json (aka, posting JSON in the body):
# r = requests.post(url, timeout=timeout, params=params, data=json.dumps(munged_postData), **kwargs)
#
# This does form-encoded, which doesn't allow POST of nested structures
r = requests.post(url, timeout=timeout, params=params, data=munged_postData, **kwargs)
elif 'delete' == cmd:
r = requests.delete(url, timeout=timeout, params=params, **kwargs)
elif 'get' == cmd:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
else:
raise ValueError("Unknown HTTP command (expected 'get', 'post' or 'delete'): " + cmd)
except Exception, e:
# rethrow the exception after we've checked for stack trace from h2o
# out of memory errors maybe don't show up right away? so we should wait for h2o
# to get it out to h2o stdout. We don't want to rely on cloud teardown to check
# because there's no delay, and we don't want to delay all cloud teardowns by waiting.
# (this is new/experimental)
exc_info = sys.exc_info()
# use this to ignore the initial connection errors during build cloud when h2o is coming up
if not noExtraErrorCheck:
h2p.red_print(
"ERROR: got exception on %s to h2o. \nGoing to check sandbox, then rethrow.." % (url + paramsStr))
time.sleep(2)
H2O.check_sandbox_for_errors(python_test_name=H2O.python_test_name);
log_rest("")
log_rest("EXCEPTION CAUGHT DOING REQUEST: " + str(e.message))
raise exc_info[1], None, exc_info[2]
H2O.verboseprint("r: " + repr(r))
if 200 != r.status_code:
pp = pprint.PrettyPrinter(indent=4)
msg = "JSON call returned non-200 status: " + url
json = r.json()
if None != json and 'dev_msg' in json:
msg += "\ndev_msg: "
msg += str(json['dev_msg'])
msg += "\nr.status_code: " + str(r.status_code)
msg += "\nr.headers: " + repr(r.headers)
if None == json:
msg += '\nERROR: the error output from H2O is not JSON!'
msg += "\nr.text: " + r.text
else:
msg += "\nr.json: "
msg += pp.pformat(json)
if raiseIfNon200:
pass # we'll pass msg up with the exception
elif not suppressErrorMsg:
print(msg)
log_rest(msg)
log_rest("")
try:
if r is None:
log_rest("r is None")
else:
log_rest("HTTP status code: " + str(r.status_code))
# The following accesses to r.text were taking most of the runtime:
log_text = False
if log_text:
if hasattr(r, 'text'):
if r.text is None:
log_rest("r.text is None")
else:
log_rest(r.text)
else:
log_rest("r does not have attr text")
except Exception, e:
# Paranoid exception catch.
# Ignore logging exceptions in the case that the above error checking isn't sufficient.
print "Caught exception from result logging: ", e, "; result: ", repr(r)
# fatal if no response
if raiseIfNon200 and not r:
raise Exception("Maybe bad url? no r in __do_json_request in %s:" % inspect.stack()[1][3] + "\n\n" + msg)
# this is used to open a browser on results, or to redo the operation in the browser
# we don't' have that may urls flying around, so let's keep them all
H2O.json_url_history.append(r.url)
# if r.json():
# raise Exception("Maybe bad url? no r.json in __do_json_request in %s:" % inspect.stack()[1][3])
rjson = None
if returnFast:
return
try:
rjson = r.json()
except:
print h2o_test_utils.dump_json(r.text)
if not isinstance(r, (list, dict)):
raise Exception("h2o json responses should always be lists or dicts, see previous for text")
raise Exception("Could not decode any json from the request.")
# TODO
# TODO
# TODO
# TODO: we should really only look in the response object. This check
# prevents us from having a field called "error" (e.g., for a scoring result).
for e in ['error', 'Error', 'errors', 'Errors']:
# error can be null (python None). This happens in exec2
if e in rjson and rjson[e]:
H2O.verboseprint("rjson:" + h2o_test_utils.dump_json(rjson))
emsg = 'rjson %s in %s: %s' % (e, inspect.stack()[1][3], rjson[e])
if ignoreH2oError:
# well, we print it..so not totally ignore. test can look at rjson returned
print emsg
else:
print emsg
raise Exception(emsg)
for w in ['warning', 'Warning', 'warnings', 'Warnings']:
# warning can be null (python None).
if w in rjson and rjson[w]:
H2O.verboseprint(dump_json(rjson))
print 'rjson %s in %s: %s' % (w, inspect.stack()[1][3], rjson[w])
# Allow the caller to check things like __http_request.status_code.
# The response object is not JSON-serializable, so we capture the fields we want here:
response = {}
# response['headers'] = r.headers
response['url'] = r.url
response['status_code'] = r.status_code
response['text'] = r.text
rjson['__http_response'] = response
return rjson
# end of __do_json_request
'''
Check the output for errors. Note: false positives are possible; a whitelist is available.
'''
@staticmethod
def check_sandbox_for_errors(cloudShutdownIsError=False, sandboxIgnoreErrors=False, python_test_name=''):
# TODO: nothing right now
return
# dont' have both tearDown and tearDownClass report the same found error
# only need the first
if nodes and nodes[0].sandbox_error_report(): # gets current state
return
# Can build a cloud that ignores all sandbox things that normally fatal the test
# Kludge, test will set this directly if it wants, rather than thru build_cloud parameter.
# we need the sandbox_ignore_errors, for the test teardown_cloud..the state disappears!
ignore = sandboxIgnoreErrors or (nodes and nodes[0].sandbox_ignore_errors)
errorFound = h2o_sandbox.check_sandbox_for_errors(
LOG_DIR=LOG_DIR,
sandboxIgnoreErrors=ignore,
cloudShutdownIsError=cloudShutdownIsError,
python_test_name=python_test_name)
if errorFound and nodes:
nodes[0].sandbox_error_report(True) # sets
###################
# REST API ACCESSORS
'''
Fetch all the cluster status from the /Cloud endpoint.
'''
def cloud(self, timeoutSecs=10, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'cloud', H2O.verbose)
result = self.__do_json_request('/3/Cloud', timeout=timeoutSecs, params=params_dict)
return result
'''
Determine if the cluster status is not good. Returns a message (which evaluates as True)
if cloud status is bad; else returns None (which evluates as False);
'''
def cloud_is_bad(self, timeoutSecs=10, **kwargs):
try:
cloud = self.cloud()
except Exception as e:
return str(e)
if cloud is None:
return '/3/Cloud returned None'
if 'cloud_size' not in cloud:
return '/3/Cloud return value does not contain cloud_size'
if 'nodes' not in cloud:
return '/3/Cloud return value does not contain nodes'
if type(cloud['nodes']) is not list:
return '/3/Cloud nodes element is not a list'
if cloud['cloud_size'] < 1:
return 'cloud_size < 1: ' + cloud['cloud_size']
size = cloud['cloud_size']
if cloud['cloud_size'] != len(cloud['nodes']):
return '/3/Cloud nodes list length != cloud_size'
node_num = 0
for node in cloud['nodes']:
if 'healthy' not in node:
return '/3/Cloud node return value does not contain healthy'
if not node['healthy']:
return 'node ' + str(node_num) + ' is not healthy'
return None
'''
Fetch all the jobs or a single job from the /Jobs endpoint.
'''
def jobs(self, job_key=None, timeoutSecs=10, **kwargs):
params_dict = {
'job_key': job_key
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'jobs', H2O.verbose)
result = self.__do_json_request('/3/Jobs', timeout=timeoutSecs, params=params_dict)
return result
'''
Poll a single job from the /Jobs endpoint until it is "status": "DONE" or "CANCELLED" or "FAILED" or we time out.
'''
# TODO: add delays, etc.
def poll_job(self, job_key, timeoutSecs=10, retryDelaySecs=0.5, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'poll_job', H2O.verbose)
start_time = time.time()
while True:
H2O.verboseprint('Polling for job: ' + job_key + '. . .')
result = self.__do_json_request('/3/Jobs/' + job_key, timeout=timeoutSecs, params=params_dict)
status = result['jobs'][0]['status']
if status == 'DONE' or status == 'CANCELLED' or status == 'FAILED':
H2O.verboseprint('Job ' + status + ': ' + job_key + '.')
return result
if time.time() - start_time > timeoutSecs:
print 'Job: ' + job_key + ' timed out in: ' + str(timeoutSecs) + '.'
# downstream checkers should tolerate None. Print msg in case it's overlooked.
return None
time.sleep(retryDelaySecs)
'''
Create a Frame.
'''
def create_frame(self, timeoutSecs=180, **kwargs):
a = self.__do_json_request('3/CreateFrame', cmd="post",
timeout=timeoutSecs,
params=kwargs
)
H2O.verboseprint("\ncreate_frame result:", h2o_test_utils.dump_json(a))
return a
'''
Split a Frame.
'''
def split_frame(self, timeoutSecs=180, **kwargs):
a = self.__do_json_request('/3/SplitFrame', cmd="post",
timeout=timeoutSecs,
postData=kwargs
)
job_json = self.poll_job(a["key"]["name"], timeoutSecs=timeoutSecs)
H2O.verboseprint("\nsplit_frame result:", h2o_test_utils.dump_json(a))
return a
'''
Create interactions.
'''
def interaction(self, timeoutSecs=180, **kwargs):
a = self.__do_json_request('/3/Interaction', cmd="post",
timeout=timeoutSecs,
postData=kwargs
)
H2O.verboseprint("\ninteraction result:", h2o_test_utils.dump_json(a))
return a
'''
Import a file or files into h2o. The 'file' parameter accepts a directory or a single file.
192.168.0.37:54323/ImportFiles.html?file=%2Fhome%2F0xdiag%2Fdatasets
'''
def import_files(self, path, timeoutSecs=180):
a = self.__do_json_request('/3/ImportFiles',
timeout=timeoutSecs,
params={"path": path}
)
H2O.verboseprint("\nimport_files result:", h2o_test_utils.dump_json(a))
return a
'''
Parse an imported raw file or files into a Frame.
'''
def parse(self, key, dest_key=None,
timeoutSecs=300, retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, **kwargs):
#
# Call ParseSetup?source_frames=[keys] . . .
#
if benchmarkLogging:
cloudPerfH2O.get_log_save(initOnly=True)
# TODO: multiple keys
parse_setup_params = {
'source_frames': '["' + key + '"]' # NOTE: quote key names
}
# h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'parse_setup', print_params=H2O.verbose)
setup_result = self.__do_json_request(jsonRequest="/3/ParseSetup", cmd='post', timeout=timeoutSecs, postData=parse_setup_params)
H2O.verboseprint("ParseSetup result:", h2o_test_utils.dump_json(setup_result))
#
# and then Parse?source_frames=<keys list> and params from the ParseSetup result
# Parse?source_frames=[nfs://Users/rpeck/Source/h2o2/smalldata/logreg/prostate.csv]&destination_frame=prostate.hex&parse_type=CSV&separator=44&number_columns=9&check_header=0&single_quotes=false&column_names=['ID',CAPSULE','AGE','RACE','DPROS','DCAPS','PSA','VOL','GLEASON]
#
parse_params = {
'source_frames': '["' + setup_result['source_frames'][0]['name'] + '"]', # TODO: cons up the whole list
'destination_frame': dest_key if dest_key else setup_result['destination_frame'],
'parse_type': setup_result['parse_type'],
'separator': setup_result['separator'],
'single_quotes': setup_result['single_quotes'],
'check_header': setup_result['check_header'],
'number_columns': setup_result['number_columns'],
'column_names': setup_result['column_names'], # gets stringified inside __do_json_request()
'column_types': setup_result['column_types'], # gets stringified inside __do_json_request()
'na_strings': setup_result['na_strings'],
'chunk_size': setup_result['chunk_size'],
}
H2O.verboseprint("parse_params: " + repr(parse_params))
h2o_test_utils.check_params_update_kwargs(parse_params, kwargs, 'parse', print_params=H2O.verbose)
parse_result = self.__do_json_request(jsonRequest="/3/Parse", cmd='post', timeout=timeoutSecs, postData=parse_params, **kwargs)
H2O.verboseprint("Parse result:", h2o_test_utils.dump_json(parse_result))
# print("Parse result:", repr(parse_result))
job_key = parse_result['job']['key']['name']
# TODO: dislike having different shapes for noPoll and poll
if noPoll:
return this.jobs(job_key)
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
if job_json:
dest_key = job_json['jobs'][0]['dest']['name']
return self.frames(dest_key)
return None
'''
Return a single Frame or all of the Frames in the h2o cluster. The
frames are contained in a list called "frames" at the top level of the
result. Currently the list is unordered.
TODO:
When find_compatible_models is implemented then the top level
dict will also contain a "models" list.
'''
def frames(self, key=None, timeoutSecs=10, **kwargs):
params_dict = {
'find_compatible_models': 0,
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'frames', H2O.verbose)
if key:
result = self.__do_json_request('/3/Frames/' + key, timeout=timeoutSecs, params=params_dict)
else:
result = self.__do_json_request('/3/Frames', timeout=timeoutSecs, params=params_dict)
return result
'''
Return the columns for a single Frame in the h2o cluster.
'''
def columns(self, key, timeoutSecs=10, **kwargs):
params_dict = {
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'columns', H2O.verbose)
result = self.__do_json_request('/3/Frames/' + key + '/columns', timeout=timeoutSecs, params=params_dict)
return result
'''
Return a single column for a single Frame in the h2o cluster.
'''
def column(self, key, column, timeoutSecs=10, **kwargs):
params_dict = {
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'column', H2O.verbose)
result = self.__do_json_request('/3/Frames/' + key + '/columns/' + column, timeout=timeoutSecs, params=params_dict)
return result
'''
Return the summary for a single column for a single Frame in the h2o cluster.
'''
def summary(self, key, column, timeoutSecs=10, **kwargs):
params_dict = {
'row_offset': 0,
'row_count': 100
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'summary', H2O.verbose)
result = self.__do_json_request('/3/Frames/' + key + '/columns/' + column + '/summary', timeout=timeoutSecs, params=params_dict)
return result
'''
Use Rapids to execute as.factor on the column of a Frame.
'''
def as_factor(self, key, column, timeoutSecs=60):
assert key is not None, 'FAIL: "key" parameter is null'
assert column is not None, 'FAIL: "column" parameter is null'
# quote column names; leave integer column indexes alone
if isinstance(column, basestring):
column = '"' + column + '"'
params_dict = {
'ast': "(assign {0} (:= {0} (as.factor (cols_py {0} {1})) {1} []))".format(key, column)
}
result = self.__do_json_request('/99/Rapids', cmd='post', timeout=timeoutSecs, postData=params_dict)
return result
'''
Delete a frame on the h2o cluster, given its key.
'''
def delete_frame(self, key, ignoreMissingKey=True, timeoutSecs=60, **kwargs):
assert key is not None, 'FAIL: "key" parameter is null'
result = self.__do_json_request('/3/Frames/' + key, cmd='delete', timeout=timeoutSecs)
# TODO: look for what?
if not ignoreMissingKey and 'f00b4r' in result:
raise ValueError('Frame key not found: ' + key)
return result
'''
Delete all frames on the h2o cluster.
'''
def delete_frames(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Frames', cmd='delete', timeout=timeoutSecs)
return result
'''
Return a model builder or all of the model builders known to the
h2o cluster. The model builders are contained in a dictionary
called "model_builders" at the top level of the result. The
dictionary maps algorithm names to parameters lists. Each of the
parameters contains all the metdata required by a client to
present a model building interface to the user.
'''
def model_builders(self, algo=None, timeoutSecs=10, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'model_builders', H2O.verbose)
if algo:
if algo in H2O.experimental_algos:
_rest_version = 99
else:
_rest_version = 3
result = self.__do_json_request(str(_rest_version)+'/ModelBuilders/' + algo, timeout=timeoutSecs, params=params_dict)
else:
result = self.__do_json_request('3/ModelBuilders', timeout=timeoutSecs, params=params_dict)
return result
'''
Check a dictionary of model builder parameters on the h2o cluster using the given algorithm and model parameters.
'''
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs):
assert algo is not None, 'FAIL: "algo" parameter is null'
# Allow this now: assert training_frame is not None, '"training_frame" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "FAIL: /ModelBuilders REST call failed"
assert algo in model_builders['model_builders'], "FAIL: algo " + algo + " not found in model_builders list: " + repr(model_builders)
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
if training_frame is not None:
frames = self.frames(key=training_frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(training_frame)
assert frames['frames'][0]['frame_id']['name'] == training_frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, frames['frames'][0]['frame_id']['name'], training_frame)
parameters['training_frame'] = training_frame
# TODO: add parameter existence checks
# TODO: add parameter value validation
if algo in H2O.experimental_algos:
_rest_version = 99
else:
_rest_version = 3
result = self.__do_json_request('/' + str(_rest_version) + '/ModelBuilders/' + algo + "/parameters", cmd='post', timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True, raiseIfNon200=False) # NOTE: DO NOT die if validation errors
H2O.verboseprint("model parameters validation: " + repr(result))
return result
'''
Build a model on the h2o cluster using the given algorithm, training
Frame and model parameters.
'''
def build_model(self, algo, training_frame, parameters, model_id = None, timeoutSecs=60, asynchronous=False, **kwargs):
# basic parameter checking
assert algo is not None, 'FAIL: "algo" parameter is null'
assert training_frame is not None, 'FAIL: "training_frame" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
# check that algo is known (TODO: remove after testing that error from POST is good enough)
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "FAIL: /ModelBuilders REST call failed"
assert algo in model_builders['model_builders'], "FAIL: failed to find algo " + algo + " in model_builders list: " + repr(model_builders)
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
# Check for frame:
frames = self.frames(key=training_frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(training_frame)
assert frames['frames'][0]['frame_id']['name'] == training_frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, frames['frames'][0]['frame_id']['name'], training_frame)
parameters['training_frame'] = training_frame
if model_id is not None:
parameters['model_id'] = model_id
result = self.__do_json_request('/3/ModelBuilders/' + algo, cmd='post', timeout=timeoutSecs, postData=parameters, raiseIfNon200=False, suppressErrorMsg=True) # NOTE: DO NOT die if validation errors
if asynchronous:
return result
elif 'error_count' in result and result['error_count'] > 0:
# parameters validation failure
return result
elif result['__http_response']['status_code'] != 200:
return result
else:
assert 'job' in result, "FAIL: did not find job key in model build result: " + repr(result)
job = result['job']
job_key = job['key']['name']
H2O.verboseprint("model building job_key: " + repr(job_key))
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
return result
'''
Build a Cartesian grid of models on the h2o cluster using the given algorithm, training
Frame, model parameters and grid parameters.
'''
def build_model_grid(self, algo, training_frame, parameters, grid_parameters, grid_id = None, timeoutSecs=60, asynchronous=False, **kwargs):
# basic parameter checking
assert algo is not None, 'FAIL: "algo" parameter is null'
assert training_frame is not None, 'FAIL: "training_frame" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
assert grid_parameters is not None, 'FAIL: "grid_parameters" parameter is null'
# check that algo is known (TODO: remove after testing that error from POST is good enough)
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "FAIL: /ModelBuilders REST call failed"
assert algo in model_builders['model_builders'], "FAIL: failed to find algo " + algo + " in model_builders list: " + repr(model_builders)
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
# Check for frame:
frames = self.frames(key=training_frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(training_frame)
assert frames['frames'][0]['frame_id']['name'] == training_frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, frames['frames'][0]['frame_id']['name'], training_frame)
parameters['training_frame'] = training_frame
# UGH: grid parameters are totally non-standard; the model parameters are mixed with grid_id and hyper_parameters. See GridSearchSchema.fillFromParms().
post_parameters = {}
post_parameters.update(parameters)
post_parameters['hyper_parameters'] = grid_parameters
# gridParams['grid_parameters'] = json.dumps(hyperParameters)
# print("post_parameters: " + repr(post_parameters))
if grid_id is not None:
post_parameters['grid_id'] = grid_id
result = self.__do_json_request('/99/Grid/' + algo, cmd='post', timeout=timeoutSecs, postData=post_parameters, raiseIfNon200=False) # NOTE: DO NOT die if validation errors
if result['__meta']['schema_type'] == 'H2OError':
print("ERROR: building model grid: " + grid_id)
print(" reason: " + result['dev_msg'])
print(" stacktrace: " + "\n ".join(result['stacktrace']))
raise ValueError("ERROR: building model grid: " + grid_id + "; reason: " + result['dev_msg'])
if asynchronous:
return result
elif 'error_count' in result and result['error_count'] > 0:
# parameters validation failure
return result
elif result['__http_response']['status_code'] != 200:
return result
else:
assert 'job' in result, "FAIL: did not find job key in model build result: " + repr(result)
job = result['job']
job_key = job['key']['name']
H2O.verboseprint("model building job_key: " + repr(job_key))
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
return result
'''
Score a model on the h2o cluster on the given Frame and return only the model metrics.
'''
def compute_model_metrics(self, model, frame, timeoutSecs=60, **kwargs):
assert model is not None, 'FAIL: "model" parameter is null'
assert frame is not None, 'FAIL: "frame" parameter is null'
models = self.models(key=model, timeoutSecs=timeoutSecs)
assert models is not None, "FAIL: /Models REST call failed"
assert models['models'][0]['model_id']['name'] == model, "FAIL: /Models/{0} returned Model {1} rather than Model {2}".format(model, models['models'][0]['model_id']['name'], model)
# TODO: test this assert, I don't think this is working. . .
frames = self.frames(key=frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(frame)
assert frames['frames'][0]['frame_id']['name'] == frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(frame, frames['frames'][0]['frame_id']['name'], frame)
result = self.__do_json_request('/3/ModelMetrics/models/' + model + '/frames/' + frame, cmd='post', timeout=timeoutSecs)
mm = result['model_metrics'][0]
H2O.verboseprint("model metrics: " + repr(mm))
return mm
def predict(self, model, frame, predictions_frame = None, timeoutSecs=60, **kwargs):
assert model is not None, 'FAIL: "model" parameter is null'
assert frame is not None, 'FAIL: "frame" parameter is null'
models = self.models(key=model, timeoutSecs=timeoutSecs)
# print("models (key={0}): ".format(model))
# pprint.PrettyPrinter(indent=4).pprint(models)
assert models is not None, "FAIL: /Models REST call failed"
assert models['models'][0]['model_id']['name'] == model, "FAIL: /Models/{0} returned Model {1} rather than Model {2}".format(model, models['models'][0]['model_id']['name'], model)
# TODO: test this assert, I don't think this is working. . .
frames = self.frames(key=frame)
assert frames is not None, "FAIL: /Frames/{0} REST call failed".format(frame)
assert frames['frames'][0]['frame_id']['name'] == frame, "FAIL: /Frames/{0} returned Frame {1} rather than Frame {2}".format(frame, frames['frames'][0]['frame_id']['name'], frame)
postData = { 'predictions_frame': predictions_frame }
result = self.__do_json_request('/3/Predictions/models/' + model + '/frames/' + frame, cmd='post', postData=postData, timeout=timeoutSecs)
return result
'''
ModelMetrics list.
'''
def model_metrics(self, model=None, frame=None, timeoutSecs=60, **kwargs):
if model is None and frame is None:
result = self.__do_json_request('/3/ModelMetrics', cmd='get', timeout=timeoutSecs)
elif model is not None and frame is not None:
result = self.__do_json_request('/3/ModelMetrics/models/' + model + '/frames/' + frame, cmd='get', timeout=timeoutSecs)
else:
raise ValueError("model_metrics can't yet handle the filter case")
return result
'''
Delete ModelMetrics.
'''
def delete_model_metrics(self, model, frame, timeoutSecs=60, **kwargs):
assert model is not None, 'FAIL: "model" parameter is null'
assert frame is not None, 'FAIL: "frame" parameter is null'
result = self.__do_json_request('/3/ModelMetrics/models/' + model + '/frames/' + frame, cmd='delete', timeout=timeoutSecs)
return result
'''
Return all of the models in the h2o cluster, or a single model given its key.
The models are contained in a list called "models" at the top level of the
result. Currently the list is unordered.
TODO:
When find_compatible_frames is implemented then the top level
dict will also contain a "frames" list.
'''
def models(self, api_version=3, key=None, timeoutSecs=20, **kwargs):
params_dict = {
'find_compatible_frames': False
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'models', H2O.verbose)
if key:
result = self.__do_json_request(str(api_version) + '/Models/' + key, timeout=timeoutSecs, params=params_dict)
else:
result = self.__do_json_request(str(api_version) + '/Models', timeout=timeoutSecs, params=params_dict)
return result
'''
Delete a model on the h2o cluster, given its key.
'''
def delete_model(self, key, ignoreMissingKey=True, timeoutSecs=60, **kwargs):
assert key is not None, 'FAIL: "key" parameter is null'
result = self.__do_json_request('/3/Models/' + key, cmd='delete', timeout=timeoutSecs)
# TODO: look for what?
if not ignoreMissingKey and 'f00b4r' in result:
raise ValueError('Model key not found: ' + key)
return result
'''
Delete all models on the h2o cluster.
'''
def delete_models(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Models', cmd='delete', timeout=timeoutSecs)
return result
'''
Return all of the grid search results in the h2o cluster.
The grid IDs are contained in a list called "grids" at the top level of the
result. Currently the list is unordered.
'''
def grids(self, api_version=99, timeoutSecs=20, **kwargs):
params_dict = {
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'grids', H2O.verbose)
result = self.__do_json_request(str(api_version) + '/Grids', timeout=timeoutSecs, params=params_dict)
return result
'''
Return a grid search result from the h2o cluster given its key.
The models IDs are contained in a list called "model_ids" at the top level of the
result. Currently the list is unordered.
'''
def grid(self, api_version=99, key=None, timeoutSecs=20, **kwargs):
params_dict = {
'sort_by': None,
'sort_order': None
}
h2o_test_utils.check_params_update_kwargs(params_dict, kwargs, 'grids', H2O.verbose)
if key:
result = self.__do_json_request(str(api_version) + '/Grids/' + key, timeout=timeoutSecs, params=params_dict)
else:
raise ValueError('Grid key not given: ' + key)
return result
'''
Fetch the list of REST API endpoints.
'''
def endpoints(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/endpoints', cmd='get', timeout=timeoutSecs)
return result
'''
Fetch the metadata for the given numbered REST API endpoint.
'''
def endpoint_by_number(self, num, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/endpoints/' + str(num), cmd='get', timeout=timeoutSecs)
return result
'''
Fetch the list of REST API schemas.
'''
def schemas(self, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/schemas', cmd='get', timeout=timeoutSecs)
return result
'''
Fetch the metadata for the given named REST API schema (e.g., FrameV3).
'''
def schema(self, schemaname, timeoutSecs=60, **kwargs):
parameters = { }
result = self.__do_json_request('/3/Metadata/schemas/' + schemaname, cmd='get', timeout=timeoutSecs)
return result
'''
def grid(self, algo, parameters, hyperParameters, timeoutSecs=60, asynchronous=False, **kwargs):
assert algo is not None, 'FAIL: "algo" parameter is null'
assert parameters is not None, 'FAIL: "parameters" parameter is null'
gridParams = parameters
gridParams['grid_parameters'] = json.dumps(hyperParameters)
result = self.__do_json_request('/99/Grid/' + algo, cmd='post', postData=gridParams, raiseIfNon200=False)
if asynchronous:
return result
elif result['__http_response']['status_code'] != 200:
return result
else:
assert 'job' in result, "FAIL: did not find job key in model build result: " + repr(result)
job = result['job']
job_key = job['key']['name']
H2O.verboseprint("grid search job_key: " + repr(job_key))
job_json = self.poll_job(job_key, timeoutSecs=timeoutSecs)
return result
'''
|
anthonydillon/horizon | refs/heads/master | openstack_dashboard/enabled/_1010_compute_panel_group.py | 43 | from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'compute'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Compute')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
|
sassoftware/conary-policy | refs/heads/master | policy/metadata.py | 2 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from conary.build import policy
class _BaseMetadata(policy.PackagePolicy):
keywords = {
'language' : None,
'troveNames' : None,
'macros' : True,
}
def __init__(self, recipe, *args, **keywords):
policy.PackagePolicy.__init__(self, recipe, *args, **keywords)
self.applymacros = self.macros
def updateArgs(self, *args, **keywords):
policy.PackagePolicy.updateArgs(self, *args, **keywords)
self.applymacros = self.macros
def _getTroveNames(self):
# Build a map of troves that we have available
availTroveNames = set(x.name
for x in self.recipe.autopkg.getComponents())
availTroveNames.update(set(self.recipe.packages))
# If no trove names were supplied, apply the metadata to all packages
troveNamesArg = ((self.troveNames is None and self.recipe.packages) or
self.troveNames)
troveNames = []
for troveName in troveNamesArg:
# Check to see if the :component syntax was used
if not troveName.startswith(':'):
if troveName not in availTroveNames:
# We don't know about this trove name, just move on
continue
troveNames.append(troveName)
continue
# The trove spec starts with :. Extract all troves that have that
# component.
for pkgName in self.recipe.packages:
if pkgName + troveName in availTroveNames:
troveNames.append(pkgName + troveName)
return troveNames
class Description(_BaseMetadata):
"""
NAME
====
B{C{r.Description()}} - Set the description for the troves built from the
recipe.
SYNOPSIS
========
C{r.Description([I{shortDesc}=,] [I{longDesc}=,] [I{language}=,] [I{troveNames}=,] [I{macros=}])}
DESCRIPTION
===========
The C{r.Description()} class adds description strings to troves.
If the keyword argument I{troveNames} is not specified, all packages built
out of the source component will be assigned the specified short
description and/or long description.
If I{troveNames} is specified, it should be a list of strings.
Normally, descriptions should not be attached to individual components of
a trove. However, it is possible to specify components in the I{troveNames}
list. It is also possible to designate just the component by prefixing it
with a colon (:) character, in which case all components with that name
from all packages will have the description.
The I{shortDesc} and I{longDesc} keyword arguments can be used to specify
the short description and the long description, respectively.
If a language is specified with keyword argument I{language}, the strings
will be associated to that language, otherwise the default language will
be used.
The C{macros} keyword accepts a boolean value, and defaults
to True. If the value of C{macros} is False, recipe macros in the
description strings will not be interpolated.
EXAMPLES
========
Assuming that the source trove will build two packages, I{prk-client} and
I{prk-server}, each with I{:runtime} and I{:lib} components:
C{r.Description(shortDescription = "PRK implementation for Linux", "This is the implementation of PRK for Linux")}
will set the descriptions for the I{prk-client} and I{prk-server} troves.
C{r.Description(shortDescription = "Runtime component for PRK", "This is the runtime component for prk", troveNames = [ ':runtime' ])}
will set the descriptions for the I{prk-client:runtime} and
I{prk-server:runtime} troves.
"""
keywords = _BaseMetadata.keywords.copy()
keywords.update({
'shortDesc' : None,
'longDesc' : None,
})
def do(self):
if not hasattr(self.recipe, '_addMetadataItem'):
# Old Conary
return
troveNames = self._getTroveNames()
itemTups = ((x, getattr(self, x)) for x in
['shortDesc', 'longDesc', 'language'])
if self.applymacros:
itemDict = dict((x, y % self.recipe.macros) for (x, y) in itemTups
if y is not None)
else:
itemDict = dict(itemTups)
self.recipe._addMetadataItem(troveNames, itemDict)
class Licenses(_BaseMetadata):
"""
NAME
====
B{C{r.Licenses()}} - Set the licenses for the troves built from the
recipe.
SYNOPSIS
========
C{r.Licenses(I{license}, [I{license}, ...] [I{language}=,] [I{troveNames}=,] [I{macros=}])}
DESCRIPTION
===========
The C{r.Licenses()} class adds license information to troves.
If the keyword argument I{troveNames} is not specified, all packages built
out of the source component will be assigned the specified license
information.
If I{troveNames} is specified, it should be a list of strings.
It is possible to specify both packages and components in the I{troveNames}
list. It is also possible to designate just the component by prefixing it
with a colon (:) character, in which case all components with that name
from all packages will have the license.
If a language is specified with keyword argument I{language}, the strings
will be associated to that language, otherwise the default language will
be used.
The C{macros} keyword accepts a boolean value, and defaults
to True. If the value of C{macros} is False, recipe macros in the
license strings will not be interpolated.
EXAMPLES
========
Assuming that the source trove will build two packages, I{prk-client} and
I{prk-server}, each with I{:runtime} and I{:lib} components:
C{r.Licenses('GPL', 'LGPL')}
will set the licenses for the I{prk-client} and I{prk-server} troves.
C{r.Licenses('GPL', 'LGPL', troveNames = [ ':runtime' ])}
will set the licenses for the I{prk-client:runtime} and
I{prk-server:runtime} troves.
"""
def __init__(self, recipe, *args, **keywords):
_BaseMetadata.__init__(self, recipe, *args, **keywords)
self.licenses = []
def updateArgs(self, *args, **keywords):
self.licenses = args
_BaseMetadata.updateArgs(self, **keywords)
def do(self):
if not hasattr(self.recipe, '_addMetadataItem'):
# Old Conary
return
troveNames = self._getTroveNames()
if self.applymacros:
licenses = [x % self.recipe.macros for x in self.licenses]
else:
licenses = self.licenses
itemDict = dict(licenses = licenses, language = self.language)
self.recipe._addMetadataItem(troveNames, itemDict)
class ResetKeyValueMetadata(policy.PackagePolicy):
"""
NAME
====
B{C{r.ResetKeyValueMetadata()}} - Selectively reset key-value metadata
fields.
SYNOPSIS
========
C{r.ResetKeyValueMetadata(I{key}, [I{key}, ...]}
DESCRIPTION
===========
The C{r.ResetKeyValueMetadata()} class can be used for selectively
resetting (deleting) keys from the key-value metadata.
EXAMPLES
========
C{r.ResetKeyValueMetadata('target-platform', 'target-architecture')}
will unset two (hypothetical) keys, I{target-platform} and
I{target-architecture}.
"""
def updateArgs(self, *args, **keywords):
self._filteredKeyValues = set(args)
policy.PackagePolicy.updateArgs(self, **keywords)
def do(self):
if not hasattr(self.recipe, '_filteredKeyValueMetadata'):
# Old Conary
return
if not hasattr(self, '_filteredKeyValues'):
# Policy not invoked
return
self.recipe._filteredKeyValueMetadata.update(self._filteredKeyValues)
|
nallath/PostProcessingPlugin | refs/heads/master | scripts/ColorChange.py | 2 | # This PostProcessing Plugin script is released
# under the terms of the AGPLv3 or higher
from ..Script import Script
#from UM.Logger import Logger
# from cura.Settings.ExtruderManager import ExtruderManager
class ColorChange(Script):
def __init__(self):
super().__init__()
def getSettingDataString(self):
return """{
"name":"Color Change",
"key": "ColorChange",
"metadata": {},
"version": 2,
"settings":
{
"layer_number":
{
"label": "Layer",
"description": "At what layer should color change occur. This will be before the layer starts printing. Specify multiple color changes with a comma.",
"unit": "",
"type": "str",
"default_value": "1"
},
"initial_retract":
{
"label": "Initial Retraction",
"description": "Initial filament retraction distance",
"unit": "mm",
"type": "float",
"default_value": 300.0
},
"later_retract":
{
"label": "Later Retraction Distance",
"description": "Later filament retraction distance for removal",
"unit": "mm",
"type": "float",
"default_value": 30.0
}
}
}"""
def execute(self, data: list):
"""data is a list. Each index contains a layer"""
layer_nums = self.getSettingValueByKey("layer_number")
initial_retract = self.getSettingValueByKey("initial_retract")
later_retract = self.getSettingValueByKey("later_retract")
color_change = "M600"
if initial_retract is not None and initial_retract > 0.:
color_change = color_change + (" E%.2f" % initial_retract)
if later_retract is not None and later_retract > 0.:
color_change = color_change + (" L%.2f" % later_retract)
color_change = color_change + " ; Generated by ColorChange plugin"
layer_targets = layer_nums.split(',')
if len(layer_targets) > 0:
for layer_num in layer_targets:
layer_num = int( layer_num.strip() )
if layer_num < len(data):
layer = data[ layer_num - 1 ]
lines = layer.split("\n")
lines.insert(2, color_change )
final_line = "\n".join( lines )
data[ layer_num - 1 ] = final_line
return data
|
ihatevim/spotbot | refs/heads/master | plugins/coin.py | 4 | from util import hook
import random
@hook.command(autohelp=False)
def coin(inp, me=None):
"""coin [amount] -- Flips [amount] of coins."""
if inp:
try:
amount = int(inp)
except (ValueError, TypeError):
return "Invalid input!"
else:
amount = 1
if amount == 1:
me("flips a coin and gets {}.".format(random.choice(["heads", "tails"])))
elif amount == 0:
me("makes a coin flipping motion with its hands.")
else:
heads = int(random.normalvariate(.5 * amount, (.75 * amount) ** .5))
tails = amount - heads
me("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails))
|
r0e/servo | refs/heads/master | tests/wpt/web-platform-tests/fetch/nosniff/resources/css.py | 207 | def main(request, response):
outcome = request.GET.first("outcome", "f")
type = request.GET.first("type", None)
content = "/* nothing to see here */"
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("x-content-type-options", "nosniff")
response.writer.write_header("content-length", len(content))
if(type != None):
response.writer.write_header("content-type", type)
response.writer.end_headers()
response.writer.write(content)
|
aethaniel/Espruino | refs/heads/master | scripts/serial_monitor_bytes.py | 8 | #!/usr/bin/python
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Utility function - return the actual bytes which come from the serial port
# ----------------------------------------------------------------------------------------
import time
import serial
import sys
import json
ser = serial.Serial(
port='/dev/ttyUSB0', # or /dev/ttyAMA0 for serial on the PI
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
xonxoff=0, rtscts=0, dsrdtr=0,
)
ser.open()
ser.isOpen()
endtime = time.time()+0.2 # wait 0.2 sec
while True:
while ser.inWaiting() > 0:
print str(ord(ser.read(1)))
ser.close()
|
Mlieou/leetcode_python | refs/heads/master | leetcode/python/ex_376.py | 3 | class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2: return len(nums)
prev_diff = nums[1] - nums[0]
if prev_diff != 0:
longest = 2
else:
longest = 1
for i in range(2, len(nums)):
curr_diff = (nums[i] - nums[i-1])
if (curr_diff > 0 and prev_diff <= 0) or (curr_diff < 0 and prev_diff >= 0):
longest += 1
prev_diff = curr_diff
return longest |
AzamYahya/shogun | refs/heads/develop | applications/easysvm/esvm/parse.py | 31 | """
This module contains code to parse the input arguments to the command line:
- easysvm.py
- datagen.py
"""
#############################################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#############################################################################################
import datafuncs
import sys
################################################################################
# basic types
def parse_range(str):
list=str.split("-")
if len(list)==1:
return (int(list[0]), int(list[0]))
if len(list)==2:
return (int(list[0]), int(list[1]))
sys.stderr.write("Cannot parse range '%s'\n" %str)
sys.exit(-1)
def parse_float_list(str):
list=str.split(",")
float_list=[] ;
for elem in list:
float_list.append(float(elem))
return float_list
def parse_int_list(str):
list=str.split(",")
int_list=[] ;
for elem in list:
int_list.append(int(elem))
return int_list
################################################################################
# input files
def parse_input_file_train(kernelname, argv):
"""Parse the input and output file names"""
if len(argv)<2 or (argv[0]=="fasta" and len(argv)<3) or (argv[0]!='fasta' and argv[0]!='arff'):
sys.stderr.write("data usage: arff <train.arff>\n or: fasta <train_pos.fa> <train_neg.fa>\n")
sys.exit(-1)
if argv[0] == 'fasta':
datafilenamepos = argv[1]
datafilenameneg = argv[2]
(examples, labels) = datafuncs.fastaread(datafilenamepos, datafilenameneg)
argv_rest=argv[3:]
elif argv[0] == 'arff':
datafilename = argv[1]
(examples, labels) = datafuncs.arffread(kernelname, datafilename)
argv_rest=argv[2:]
else:
print 'Error in parse_input_file'
return (examples,labels,argv_rest)
def parse_input_file_train_test(kernelname, argv):
"""Parse the input and output file names"""
if len(argv)<3 or (argv[0]=="fasta" and len(argv)<4) or (argv[0]!='fasta' and argv[0]!='arff'):
sys.stderr.write("data usage: arff <train.arff> <test.arff>\n or: fasta <train_pos.fa> <train_neg.fa> <test.fa>\n")
sys.exit(-1)
if argv[0] == 'fasta':
datafilenamepos = argv[1]
datafilenameneg = argv[2]
datafilenametest = argv[3]
(trainex, trainlab) = datafuncs.fastaread(datafilenamepos, datafilenameneg)
(testex, testlab) = datafuncs.fastaread(datafilenametest)
argv_rest=argv[4:]
elif argv[0] == 'arff':
datafilename = argv[1]
datafilenametest = argv[2]
(trainex, trainlab) = datafuncs.arffread(kernelname, datafilename)
(testex, testlab) = datafuncs.arffread(kernelname, datafilenametest)
argv_rest=argv[3:]
else:
print 'Error in parse_input_file'
return (trainex,trainlab,testex,argv_rest)
################################################################################
# prediction file
def parse_prediction_file(fname):
outputs=[]
splitassignments=[]
f = open(fname)
str=f.read()
lines = str.split('\n')
num=0
for line in lines:
if len(line)>0 and line[0] != '#':
elems=line.split('\t')
assert(len(elems)>1)
assert(int(elems[0]) == num)
num+=1
if len(elems)==2:
outputs.append(float(elems[1]))
else:
assert(len(elems)==3)
outputs.append(float(elems[1]))
splitassignments.append(float(elems[2]))
f.close()
if len(splitassignments)==0:
splitassignments = None
return (outputs, splitassignments)
################################################################################
# kernel parameters
def parse_kernel_param(argv, allow_modelsel_params):
"""Parse the arguments for a particular kernel"""
if len(argv)<1:
sys.stderr.write("kernel usage: <kernelname> [<parameters>]\n")
sys.exit(-1)
kernelname = argv[0]
kparam = {}
kparam["name"]=kernelname
kparam["modelsel_name"]=None
kparam["modelsel_params"]=None
if kernelname == 'gauss':
if len(argv)<2:
sys.stderr.write("kernel usage: gauss <width>\n")
sys.exit(-1)
if allow_modelsel_params:
kparam['width'] = None
kparam["modelsel_name"]="width"
kparam["modelsel_params"]=parse_float_list(argv[1])
else:
kparam['width'] = float(argv[1])
argv_rest=argv[2:]
elif kernelname == 'linear':
kparam['scale']=1
# no parameters
argv_rest=argv[1:]
elif kernelname == 'poly':
if len(argv)<4:
sys.stderr.write("kernel usage: poly <degree> <true|false> <true|false>\n")
sys.exit(-1)
if allow_modelsel_params:
kparam['degree'] = None
kparam["modelsel_name"]="degree"
kparam["modelsel_params"]=parse_int_list(argv[1])
else:
kparam['degree'] = int(argv[1])
kparam['inhomogene'] = (argv[2] == 'true')
kparam['normal'] = (argv[3] == 'true')
argv_rest=argv[4:]
elif kernelname == 'wd':
if len(argv)<3:
sys.stderr.write("kernel usage: wd <degree> <shift>\n")
sys.exit(-1)
if allow_modelsel_params:
kparam['degree'] = None
kparam["modelsel_name"]="degree"
kparam["modelsel_params"]=parse_int_list(argv[1])
else:
kparam['degree'] = int(argv[1])
if allow_modelsel_params and len(kparam["modelsel_params"])==1:
kparam['degree'] = kparam["modelsel_params"][0]
kparam['shift'] = None
kparam["modelsel_name"] = "shift"
kparam["modelsel_params"]=parse_int_list(argv[2])
else:
kparam['shift'] = int(argv[2])
argv_rest=argv[3:]
elif kernelname == 'spec':
if len(argv)<2:
sys.stderr.write("kernel usage: spec <degree>\n")
sys.exit(-1)
if allow_modelsel_params:
kparam['degree'] = None
kparam["modelsel_name"]="degree"
kparam["modelsel_params"]=parse_int_list(argv[1])
else:
kparam['degree'] = int(argv[1])
argv_rest=argv[2:]
elif kernelname == 'localalign':
# no parameters
argv_rest=argv[1:]
elif kernelname == 'localimprove':
if len(argv)<4:
sys.stderr.write("kernel usage: localimprove <length> <indegree> <outdegree>\n")
sys.exit(-1)
kparam['length'] = int(argv[1])
if allow_modelsel_params:
kparam['width'] = None
kparam["modelsel_name"]="indeg"
kparam["modelsel_params"]=parse_int_list(argv[2])
else:
kparam['indeg'] = int(argv[2])
kparam['outdeg'] = int(argv[3])
argv_rest=argv[4:]
else:
sys.stderr.write( 'Unknown kernel name %s in parse_kernel_param\n' % kernelname )
sys.exit(-1)
return kernelname,kparam,argv_rest
|
larrybradley/astropy | refs/heads/remote-tests | astropy/io/fits/scripts/fitsdiff.py | 8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import argparse
import glob
import logging
import os
import sys
from astropy.io import fits
from astropy.io.fits.util import fill
from astropy import __version__
log = logging.getLogger('fitsdiff')
DESCRIPTION = """
Compare two FITS image files and report the differences in header keywords and
data.
fitsdiff [options] filename1 filename2
where filename1 filename2 are the two files to be compared. They may also be
wild cards, in such cases, they must be enclosed by double or single quotes, or
they may be directory names. If both are directory names, all files in each of
the directories will be included; if only one is a directory name, then the
directory name will be prefixed to the file name(s) specified by the other
argument. for example::
fitsdiff "*.fits" "/machine/data1"
will compare all FITS files in the current directory to the corresponding files
in the directory /machine/data1.
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#fitsdiff
for further documentation.
""".strip()
EPILOG = fill("""
If the two files are identical within the specified conditions, it will report
"No difference is found." If the value(s) of -c and -k takes the form
'@filename', list is in the text file 'filename', and each line in that text
file contains one keyword.
Example
-------
fitsdiff -k filename,filtnam1 -n 5 -r 1.e-6 test1.fits test2
This command will compare files test1.fits and test2.fits, report maximum of 5
different pixels values per extension, only report data values larger than
1.e-6 relative to each other, and will neglect the different values of keywords
FILENAME and FILTNAM1 (or their very existence).
fitsdiff command-line arguments can also be set using the environment variable
FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present,
each argument present will override the corresponding argument on the
command-line unless the --exact option is specified. The FITSDIFF_SETTINGS
environment variable exists to make it easier to change the
behavior of fitsdiff on a global level, such as in a set of regression tests.
""".strip(), width=80)
class StoreListAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, [])
# Accept either a comma-separated list or a filename (starting with @)
# containing a value on each line
if values and values[0] == '@':
value = values[1:]
if not os.path.exists(value):
log.warning(f'{self.dest} argument {value} does not exist')
return
try:
values = [v.strip() for v in open(value, 'r').readlines()]
setattr(namespace, self.dest, values)
except OSError as exc:
log.warning('reading {} for {} failed: {}; ignoring this '
'argument'.format(value, self.dest, exc))
del exc
else:
setattr(namespace, self.dest,
[v.strip() for v in values.split(',')])
def handle_options(argv=None):
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version=f'%(prog)s {__version__}')
parser.add_argument(
'fits_files', metavar='file', nargs='+',
help='.fits files to process.')
parser.add_argument(
'-q', '--quiet', action='store_true',
help='Produce no output and just return a status code.')
parser.add_argument(
'-n', '--num-diffs', type=int, default=10, dest='numdiffs',
metavar='INTEGER',
help='Max number of data differences (image pixel or table element) '
'to report per extension (default %(default)s).')
parser.add_argument(
'-r', '--rtol', '--relative-tolerance', type=float, default=None,
dest='rtol', metavar='NUMBER',
help='The relative tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %(default)s).')
parser.add_argument(
'-a', '--atol', '--absolute-tolerance', type=float, default=None,
dest='atol', metavar='NUMBER',
help='The absolute tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %(default)s).')
parser.add_argument(
'-b', '--no-ignore-blanks', action='store_false',
dest='ignore_blanks', default=True,
help="Don't ignore trailing blanks (whitespace) in string values. "
"Otherwise trailing blanks both in header keywords/values and in "
"table column values) are not treated as significant i.e., "
"without this option 'ABCDEF ' and 'ABCDEF' are considered "
"equivalent. ")
parser.add_argument(
'--no-ignore-blank-cards', action='store_false',
dest='ignore_blank_cards', default=True,
help="Don't ignore entirely blank cards in headers. Normally fitsdiff "
"does not consider blank cards when comparing headers, but this "
"will ensure that even blank cards match up. ")
parser.add_argument(
'--exact', action='store_true',
dest='exact_comparisons', default=False,
help="Report ALL differences, "
"overriding command-line options and FITSDIFF_SETTINGS. ")
parser.add_argument(
'-o', '--output-file', metavar='FILE',
help='Output results to this file; otherwise results are printed to '
'stdout.')
parser.add_argument(
'-u', '--ignore-hdus', action=StoreListAction,
default=[], dest='ignore_hdus',
metavar='HDU_NAMES',
help='Comma-separated list of HDU names not to be compared. HDU '
'names may contain wildcard patterns.')
group = parser.add_argument_group('Header Comparison Options')
group.add_argument(
'-k', '--ignore-keywords', action=StoreListAction,
default=[], dest='ignore_keywords',
metavar='KEYWORDS',
help='Comma-separated list of keywords not to be compared. Keywords '
'may contain wildcard patterns. To exclude all keywords, use '
'"*"; make sure to have double or single quotes around the '
'asterisk on the command-line.')
group.add_argument(
'-c', '--ignore-comments', action=StoreListAction,
default=[], dest='ignore_comments',
metavar='COMMENTS',
help='Comma-separated list of keywords whose comments will not be '
'compared. Wildcards may be used as with --ignore-keywords.')
group = parser.add_argument_group('Table Comparison Options')
group.add_argument(
'-f', '--ignore-fields', action=StoreListAction,
default=[], dest='ignore_fields',
metavar='COLUMNS',
help='Comma-separated list of fields (i.e. columns) not to be '
'compared. All columns may be excluded using "*" as with '
'--ignore-keywords.')
options = parser.parse_args(argv)
# Determine which filenames to compare
if len(options.fits_files) != 2:
parser.error('\nfitsdiff requires two arguments; '
'see `fitsdiff --help` for more details.')
return options
def setup_logging(outfile=None):
log.setLevel(logging.INFO)
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
error_handler.setLevel(logging.WARNING)
log.addHandler(error_handler)
if outfile is not None:
output_handler = logging.FileHandler(outfile)
else:
output_handler = logging.StreamHandler()
class LevelFilter(logging.Filter):
"""Log only messages matching the specified level."""
def __init__(self, name='', level=logging.NOTSET):
logging.Filter.__init__(self, name)
self.level = level
def filter(self, rec):
return rec.levelno == self.level
# File output logs all messages, but stdout logs only INFO messages
# (since errors are already logged to stderr)
output_handler.addFilter(LevelFilter(level=logging.INFO))
output_handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(output_handler)
def match_files(paths):
if os.path.isfile(paths[0]) and os.path.isfile(paths[1]):
# shortcut if both paths are files
return [paths]
dirnames = [None, None]
filelists = [None, None]
for i, path in enumerate(paths):
if glob.has_magic(path):
files = [os.path.split(f) for f in glob.glob(path)]
if not files:
log.error('Wildcard pattern %r did not match any files.', path)
sys.exit(2)
dirs, files = list(zip(*files))
if len(set(dirs)) > 1:
log.error('Wildcard pattern %r should match only one '
'directory.', path)
sys.exit(2)
dirnames[i] = set(dirs).pop()
filelists[i] = sorted(files)
elif os.path.isdir(path):
dirnames[i] = path
filelists[i] = [f for f in sorted(os.listdir(path))
if os.path.isfile(os.path.join(path, f))]
elif os.path.isfile(path):
dirnames[i] = os.path.dirname(path)
filelists[i] = [os.path.basename(path)]
else:
log.error(
'%r is not an existing file, directory, or wildcard '
'pattern; see `fitsdiff --help` for more usage help.', path)
sys.exit(2)
dirnames[i] = os.path.abspath(dirnames[i])
filematch = set(filelists[0]) & set(filelists[1])
for a, b in [(0, 1), (1, 0)]:
if len(filelists[a]) > len(filematch) and not os.path.isdir(paths[a]):
for extra in sorted(set(filelists[a]) - filematch):
log.warning('%r has no match in %r', extra, dirnames[b])
return [(os.path.join(dirnames[0], f),
os.path.join(dirnames[1], f)) for f in filematch]
def main(args=None):
args = args or sys.argv[1:]
if 'FITSDIFF_SETTINGS' in os.environ:
args = os.environ['FITSDIFF_SETTINGS'].split() + args
opts = handle_options(args)
if opts.rtol is None:
opts.rtol = 0.0
if opts.atol is None:
opts.atol = 0.0
if opts.exact_comparisons:
# override the options so that each is the most restrictive
opts.ignore_keywords = []
opts.ignore_comments = []
opts.ignore_fields = []
opts.rtol = 0.0
opts.atol = 0.0
opts.ignore_blanks = False
opts.ignore_blank_cards = False
if not opts.quiet:
setup_logging(opts.output_file)
files = match_files(opts.fits_files)
close_file = False
if opts.quiet:
out_file = None
elif opts.output_file:
out_file = open(opts.output_file, 'w')
close_file = True
else:
out_file = sys.stdout
identical = []
try:
for a, b in files:
# TODO: pass in any additional arguments here too
diff = fits.diff.FITSDiff(
a, b,
ignore_hdus=opts.ignore_hdus,
ignore_keywords=opts.ignore_keywords,
ignore_comments=opts.ignore_comments,
ignore_fields=opts.ignore_fields,
numdiffs=opts.numdiffs,
rtol=opts.rtol,
atol=opts.atol,
ignore_blanks=opts.ignore_blanks,
ignore_blank_cards=opts.ignore_blank_cards)
diff.report(fileobj=out_file)
identical.append(diff.identical)
return int(not all(identical))
finally:
if close_file:
out_file.close()
# Close the file if used for the logging output, and remove handlers to
# avoid having them multiple times for unit tests.
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
log.removeHandler(handler)
|
tylesmit/openstack-dashboard | refs/heads/master | django-openstack/bootstrap.py | 191 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, textwrap, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__)==1 and
not os.path.exists(os.path.join(v.__path__[0],'__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source +"."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.append('buildout:accept-buildout-test-releases=true')
args.append('bootstrap')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
|
krother/maze_run | refs/heads/master | leftovers/chapter10_data_structures.py | 1 |
"""
"""
import pygame
from pygame import image, Rect
from pygame.locals import KEYDOWN
from collections import namedtuple
from part1 import draw_grid
from chapter08_load_tile_positions import load_tile_positions
from chapter08_load_tile_positions import TILE_POSITION_FILE, TILE_IMAGE_FILE, SIZE
from chapter09_event_loop_with_mediator import event_loop, exit_game
from util import create_display
Position = namedtuple('Position', ['x', 'y'])
TileSet = namedtuple('TileSet', ['image', 'positions'])
LEFT = Position(-1, 0)
RIGHT = Position(1, 0)
UP = Position(0, -1)
DOWN = Position(0, 1)
def get_tile_rect(position):
"""Converts tile indices to a pygame.Rect"""
return Rect(position.x*SIZE, position.y*SIZE, SIZE, SIZE)
maze = [ # convert to nested list.
"#####",
"#...#",
"#..x#",
"#####"
]
player = {
'maze': maze,
'position': Position(1, 1),
'tile': "*",
'move_keys': {276: LEFT, 275: RIGHT, 273: UP, 274: DOWN}
}
ghost = {
'maze': maze,
'position': Position(3, 1),
'tile': "g",
}
def draw_sprite(sprite, img, tiles):
"""Draws sprite on a grid"""
rect = get_tile_rect(sprite['position'])
symbol = sprite['tile']
img.blit(tiles.image, rect, tiles.positions[symbol])
def draw(maze, sprites, display, tiles):
img = draw_grid(maze, tiles.image, tiles.positions)
for s in sprites:
draw_sprite(s, img, tiles)
display.blit(img, Rect((0, 0, 384, 224)), Rect((0, 0, 384, 224)))
pygame.display.update()
def wait_for_key(event):
exit_game()
if __name__ == '__main__':
display = create_display((800, 600))
tile_image = image.load(TILE_IMAGE_FILE)
tile_positions = load_tile_positions(TILE_POSITION_FILE)
tiles = TileSet(tile_image, tile_positions)
sprites = [player, ghost]
draw(maze, sprites, display, tiles)
event_loop({KEYDOWN: wait_for_key})
|
jollyrogue/demdb | refs/heads/master | backend/libs/apipublic.py | 1 | #!/usr/bin/env python3
'''
Library to hold the API logic functions.
'''
from flask_restful import Resource
class DemDbApiPublic(Resource):
def get(self):
return {'msg': 'Welcome to the public Democracy Database API v0.1!'}
|
CO600GOL/Game_of_life | refs/heads/develop | ProjectConway/projectconway/testing/test_views/test_tutorial4.py | 1 | from pyramid.testing import DummyRequest
from projectconway.views.tutorial4 import tutorial4_view
class TestTutorial4(object):
'''
Tests all the views releated to the fourth tutorial page.
'''
def test_tutorial4_view(self):
'''
Tests the tutorial-4 view to ensure it functions correctly.
'''
# Setup
request = DummyRequest(route='tutorial4')
response = tutorial4_view(request)
# Test that a response has been given.
assert response
assert response["page"] == "tutorial4page"
assert response["title"] == "Tutorial4" |
erudit/zenon | refs/heads/master | tests/functional/apps/userspace/journal/information/test_views.py | 1 | from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.test import TestCase
from base.test.factories import UserFactory
from base.test.testcases import Client
from erudit.models import JournalInformation
from erudit.test.factories import JournalFactory
from apps.userspace.journal.information.forms import ContributorInlineFormset
from core.authorization.defaults import AuthorizationConfig as AC
from core.authorization.test.factories import AuthorizationFactory
class TestJournalInformationUpdateView(TestCase):
def test_cannot_be_accessed_by_users_who_cannot_edit_journals(self):
# Setup
journal = JournalFactory()
user = UserFactory()
client = Client(logged_user=user)
url = reverse('userspace:journal:information:update',
kwargs={'journal_pk': journal.pk})
# Run
response = client.get(url)
# Check
self.assertEqual(response.status_code, 403)
def test_embed_the_selected_language_into_the_context(self):
# Setup
journal = JournalFactory()
user = UserFactory()
journal.members.add(user)
AuthorizationFactory.create(
content_type=ContentType.objects.get_for_model(journal), object_id=journal.id,
user=user, authorization_codename=AC.can_edit_journal_information.codename)
client = Client(logged_user=user)
url = reverse('userspace:journal:information:update',
kwargs={'journal_pk': journal.pk})
# Run
response_1 = client.get(url)
response_2 = client.get(url, {'lang': 'en'})
# Check
self.assertEqual(response_1.status_code, 200)
self.assertEqual(response_2.status_code, 200)
self.assertEqual(response_1.context['selected_language'], 'fr')
self.assertEqual(response_2.context['selected_language'], 'en')
def test_can_be_used_to_update_journal_information_using_the_current_lang(self):
# Setup
user = UserFactory()
journal = JournalFactory(members=[user])
AuthorizationFactory.create(
content_type=ContentType.objects.get_for_model(journal), object_id=journal.id,
user=user, authorization_codename=AC.can_edit_journal_information.codename)
client = Client(logged_user=user)
post_data = {
'about_fr': 'Ceci est un test',
'contributor_set-TOTAL_FORMS': 0,
'contributor_set-INITIAL_FORMS': 0,
'contributor_set-MAX_FORMS': 1,
'contributor_set-MIN_FORMS': 0
}
formset = ContributorInlineFormset()
url = reverse('userspace:journal:information:update',
kwargs={'journal_pk': journal.pk})
# Run
response = client.post(url, post_data, follow=False)
# Check
self.assertEqual(response.status_code, 302)
info = JournalInformation.objects.get(journal=journal)
self.assertEqual(info.about_fr, post_data['about_fr'])
def test_can_be_used_to_update_journal_information_using_a_specific_lang(self):
# Setup
user = UserFactory()
journal = JournalFactory(members=[user])
AuthorizationFactory.create(
content_type=ContentType.objects.get_for_model(journal), object_id=journal.id,
user=user, authorization_codename=AC.can_edit_journal_information.codename)
client = Client(logged_user=user)
post_data = {
'about_en': 'This is a test',
'contributor_set-TOTAL_FORMS': 0,
'contributor_set-INITIAL_FORMS': 0,
'contributor_set-MAX_FORMS': 1,
'contributor_set-MIN_FORMS': 0
}
url = '{}?lang=en'.format(
reverse('userspace:journal:information:update',
kwargs={'journal_pk': journal.pk}))
# Run
response = client.post(url, post_data, follow=False)
# Check
self.assertEqual(response.status_code, 302)
info = JournalInformation.objects.get(journal=journal)
self.assertEqual(info.about_en, post_data['about_en'])
|
cyanna/edx-platform | refs/heads/master | common/djangoapps/terrain/browser.py | 84 | """
Browser set up for acceptance tests.
"""
# pylint: disable=no-member
# pylint: disable=unused-argument
from lettuce import before, after, world
from splinter.browser import Browser
from logging import getLogger
from django.core.management import call_command
from django.conf import settings
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import requests
from base64 import encodestring
from json import dumps
import xmodule.modulestore.django
from xmodule.contentstore.django import _CONTENTSTORE
# There is an import issue when using django-staticfiles with lettuce
# Lettuce assumes that we are using django.contrib.staticfiles,
# but the rest of the app assumes we are using django-staticfiles
# (in particular, django-pipeline and our mako implementation)
# To resolve this, we check whether staticfiles is installed,
# then redirect imports for django.contrib.staticfiles
# to use staticfiles.
try:
import staticfiles
import staticfiles.handlers
except ImportError:
pass
else:
import sys
sys.modules['django.contrib.staticfiles'] = staticfiles
sys.modules['django.contrib.staticfiles.handlers'] = staticfiles.handlers
LOGGER = getLogger(__name__)
LOGGER.info("Loading the lettuce acceptance testing terrain file...")
MAX_VALID_BROWSER_ATTEMPTS = 20
GLOBAL_SCRIPT_TIMEOUT = 60
def get_saucelabs_username_and_key():
"""
Returns the Sauce Labs username and access ID as set by environment variables
"""
return {"username": settings.SAUCE.get('USERNAME'), "access-key": settings.SAUCE.get('ACCESS_ID')}
def set_saucelabs_job_status(jobid, passed=True):
"""
Sets the job status on sauce labs
"""
config = get_saucelabs_username_and_key()
url = 'http://saucelabs.com/rest/v1/{}/jobs/{}'.format(config['username'], world.jobid)
body_content = dumps({"passed": passed})
base64string = encodestring('{}:{}'.format(config['username'], config['access-key']))[:-1]
headers = {"Authorization": "Basic {}".format(base64string)}
result = requests.put(url, data=body_content, headers=headers)
return result.status_code == 200
def make_saucelabs_desired_capabilities():
"""
Returns a DesiredCapabilities object corresponding to the environment sauce parameters
"""
desired_capabilities = settings.SAUCE.get('BROWSER', DesiredCapabilities.CHROME)
desired_capabilities['platform'] = settings.SAUCE.get('PLATFORM')
desired_capabilities['version'] = settings.SAUCE.get('VERSION')
desired_capabilities['device-type'] = settings.SAUCE.get('DEVICE')
desired_capabilities['name'] = settings.SAUCE.get('SESSION')
desired_capabilities['build'] = settings.SAUCE.get('BUILD')
desired_capabilities['video-upload-on-pass'] = False
desired_capabilities['sauce-advisor'] = False
desired_capabilities['capture-html'] = True
desired_capabilities['record-screenshots'] = True
desired_capabilities['selenium-version'] = "2.34.0"
desired_capabilities['max-duration'] = 3600
desired_capabilities['public'] = 'public restricted'
return desired_capabilities
@before.harvest
def initial_setup(server):
"""
Launch the browser once before executing the tests.
"""
world.absorb(settings.LETTUCE_SELENIUM_CLIENT, 'LETTUCE_SELENIUM_CLIENT')
if world.LETTUCE_SELENIUM_CLIENT == 'local':
browser_driver = getattr(settings, 'LETTUCE_BROWSER', 'chrome')
if browser_driver == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities['loggingPrefs'] = {
'browser': 'ALL',
}
elif browser_driver == 'firefox':
desired_capabilities = DesiredCapabilities.FIREFOX
else:
desired_capabilities = {}
# There is an issue with ChromeDriver2 r195627 on Ubuntu
# in which we sometimes get an invalid browser session.
# This is a work-around to ensure that we get a valid session.
success = False
num_attempts = 0
while (not success) and num_attempts < MAX_VALID_BROWSER_ATTEMPTS:
# Load the browser and try to visit the main page
# If the browser couldn't be reached or
# the browser session is invalid, this will
# raise a WebDriverException
try:
world.browser = Browser(browser_driver, desired_capabilities=desired_capabilities)
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
world.visit('/')
except WebDriverException:
if hasattr(world, 'browser'):
world.browser.quit()
num_attempts += 1
else:
success = True
# If we were unable to get a valid session within the limit of attempts,
# then we cannot run the tests.
if not success:
raise IOError("Could not acquire valid {driver} browser session.".format(driver=browser_driver))
world.absorb(0, 'IMPLICIT_WAIT')
world.browser.driver.set_window_size(1280, 1024)
elif world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
config = get_saucelabs_username_and_key()
world.browser = Browser(
'remote',
url="http://{}:{}@ondemand.saucelabs.com:80/wd/hub".format(config['username'], config['access-key']),
**make_saucelabs_desired_capabilities()
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
elif world.LETTUCE_SELENIUM_CLIENT == 'grid':
world.browser = Browser(
'remote',
url=settings.SELENIUM_GRID.get('URL'),
browser=settings.SELENIUM_GRID.get('BROWSER'),
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
else:
raise Exception("Unknown selenium client '{}'".format(world.LETTUCE_SELENIUM_CLIENT))
world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT)
world.absorb(world.browser.driver.session_id, 'jobid')
@before.each_scenario
def reset_data(scenario):
"""
Clean out the django test database defined in the
envs/acceptance.py file: edx-platform/db/test_edx.db
"""
LOGGER.debug("Flushing the test database...")
call_command('flush', interactive=False, verbosity=0)
world.absorb({}, 'scenario_dict')
@before.each_scenario
def configure_screenshots(scenario):
"""
Before each scenario, turn off automatic screenshots.
Args: str, scenario. Name of current scenario.
"""
world.auto_capture_screenshots = False
@after.each_scenario
def clear_data(scenario):
world.spew('scenario_dict')
@after.each_scenario
def reset_databases(scenario):
'''
After each scenario, all databases are cleared/dropped. Contentstore data are stored in unique databases
whereas modulestore data is in unique collection names. This data is created implicitly during the scenarios.
If no data is created during the test, these lines equivilently do nothing.
'''
xmodule.modulestore.django.modulestore()._drop_database() # pylint: disable=protected-access
xmodule.modulestore.django.clear_existing_modulestores()
_CONTENTSTORE.clear()
@world.absorb
def capture_screenshot(image_name):
"""
Capture a screenshot outputting it to a defined directory.
This function expects only the name of the file. It will generate
the full path of the output screenshot.
If the name contains spaces, they ill be converted to underscores.
"""
output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_'))
try:
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error("Could not capture a screenshot '{}'".format(image_name))
@after.each_scenario
def screenshot_on_error(scenario):
"""
Save a screenshot to help with debugging.
"""
if scenario.failed:
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, scenario.name.replace(' ', '_'))
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error('Could not capture a screenshot')
@after.each_scenario
def capture_console_log(scenario):
"""
Save the console log to help with debugging.
"""
if scenario.failed:
log = world.browser.driver.get_log('browser')
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
file_name = '{}/{}.log'.format(output_dir, scenario.name.replace(' ', '_'))
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except WebDriverException:
LOGGER.error('Could not capture the console log')
def capture_screenshot_for_step(step, when):
"""
Useful method for debugging acceptance tests that are run in Vagrant.
This method runs automatically before and after each step of an acceptance
test scenario. The variable:
world.auto_capture_screenshots
either enables or disabled the taking of screenshots. To change the
variable there is a convenient step defined:
I (enable|disable) auto screenshots
If you just want to capture a single screenshot at a desired point in code,
you should use the method:
world.capture_screenshot("image_name")
"""
if world.auto_capture_screenshots:
scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1
step_num = step.scenario.steps.index(step) + 1
step_func_name = step.defined_at.function.func_name
image_name = "{prefix:03d}__{num:03d}__{name}__{postfix}".format(
prefix=scenario_num,
num=step_num,
name=step_func_name,
postfix=when
)
world.capture_screenshot(image_name)
@before.each_step
def before_each_step(step):
capture_screenshot_for_step(step, '1_before')
@after.each_step
def after_each_step(step):
capture_screenshot_for_step(step, '2_after')
@after.harvest
def teardown_browser(total):
"""
Quit the browser after executing the tests.
"""
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
set_saucelabs_job_status(world.jobid, total.scenarios_ran == total.scenarios_passed)
world.browser.quit()
|
eventql/eventql | refs/heads/master | deps/3rdparty/spidermonkey/mozjs/testing/mozbase/mozfile/mozfile/__init__.py | 24 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from mozfile import *
|
mdanielwork/intellij-community | refs/heads/master | python/testData/intentions/PyAnnotateVariableTypeIntentionTest/typeCommentInstanceAttributeDocstring_after.py | 19 | class MyClass:
"""Docstring."""
attr = None # type: [int]
def __init__(self):
self.attr = 42
self.attr |
4ndr345/BDv3 | refs/heads/master | bladedesigner/unittest/distribution/chebyshev.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# BladeDesigner
# Copyright (C) 2014 Andreas Kührmann [andreas.kuehrmann@gmail.com]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import nose.tools
from ... import distribution
def get_distribution():
return distribution.Chebyshev()
def test_resolution():
d = get_distribution()
nose.tools.assert_equal(d(200).size, 200)
def test_start_point():
d = get_distribution()
nose.tools.assert_equal(d(2)[0], 0)
def test_end_point():
d = get_distribution()
nose.tools.assert_equal(d(2)[-1], 1)
|
gilsondev/cajadoarquitetura | refs/heads/master | cajadoarquitetura/core/__init__.py | 12133432 | |
smajda/cookiecutter-django-crud-app | refs/heads/master | {{cookiecutter.app_name}}/__init__.py | 12133432 | |
none-da/zeshare | refs/heads/master | django_messages_framework/api.py | 1 | from django_messages_framework import constants
from django_messages_framework.storage import default_storage
from django.utils.functional import lazy, memoize
__all__ = (
'add_message', 'get_messages',
'debug', 'info', 'success', 'warning', 'error',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app, falling
back to the user's message_set if MessageMiddleware hasn't been enabled.
"""
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if hasattr(request, 'user') and request.user.is_authenticated():
return request.user.message_set.create(message=message)
if not fail_silently:
raise MessageFailure('Without the django_messages_framework '
'middleware, messages can only be added to '
'authenticated users.')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
user.message_set.all() as the old auth context processor did.
"""
if hasattr(request, '_messages'):
return request._messages
def get_user():
if hasattr(request, 'user'):
return request.user
else:
from django.contrib.auth.models import AnonymousUser
return AnonymousUser()
return lazy(memoize(get_user().get_and_delete_messages, {}, 0), list)()
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
if hasattr(request, '_messages'):
storage = request._messages
else:
storage = default_storage(request)
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
joyxu/autotest | refs/heads/master | tko/parsers/test/new_scenario.py | 4 | #!/usr/bin/python
"""Create new scenario test instance from an existing results directory.
This automates creation of regression tests for the results parsers.
There are 2 primary use cases for this.
1) Bug fixing: Parser broke on some input in the field and we want
to start with a test that operates on that input and fails. We
then apply fixes to the parser implementation until it passes.
2) Regression alarms: We take input from various real scenarios that
work as expected with the parser. These will be used to ensure
we do not break the expected functionality of the parser while
refactoring it.
While much is done automatically, a scenario harness is meant to
be easily extended and configured once generated.
"""
import optparse
import os
import shutil
import sys
from os import path
try:
import autotest.common as common
except ImportError:
import common
from autotest.tko.parsers.test import scenario_base
from autotest.client.shared import autotemp
usage = 'usage: %prog [options] results_dirpath scenerios_dirpath'
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-n', '--name',
help='Name for new scenario instance. Will use dirname if not specified')
parser.add_option(
'-p', '--parser_result_tag',
default='v1',
help='Storage tag to use for initial parser result.')
parser.add_option(
'-t', '--template_type',
default='base',
help='Type of unittest module to copy into new scenario.')
def main():
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit(1)
results_dirpath = path.normpath(args[0])
if not path.exists(results_dirpath) or not path.isdir(results_dirpath):
print 'Invalid results_dirpath:', results_dirpath
parser.print_help()
sys.exit(1)
scenarios_dirpath = path.normpath(args[1])
if not path.exists(scenarios_dirpath) or not path.isdir(scenarios_dirpath):
print 'Invalid scenarios_dirpath:', scenarios_dirpath
parser.print_help()
sys.exit(1)
results_dirname = path.basename(results_dirpath)
# Not everything is a valid python package name, fix if necessary
package_dirname = scenario_base.fix_package_dirname(
options.name or results_dirname)
scenario_package_dirpath = path.join(
scenarios_dirpath, package_dirname)
if path.exists(scenario_package_dirpath):
print (
'Scenario package already exists at path: %s' %
scenario_package_dirpath)
parser.print_help()
sys.exit(1)
# Create new scenario package
os.mkdir(scenario_package_dirpath)
# Create tmp_dir
tmp_dirpath = autotemp.tempdir(unique_id='new_scenario')
copied_dirpath = path.join(tmp_dirpath.name, results_dirname)
# Copy results_dir
shutil.copytree(results_dirpath, copied_dirpath)
# scenario_base.sanitize_results_data(copied_dirpath)
# Launch parser on copied_dirpath, collect emitted test objects.
harness = scenario_base.new_parser_harness(copied_dirpath)
try:
parser_result = harness.execute()
except Exception, e:
parser_result = e
scenario_base.store_parser_result(
scenario_package_dirpath, parser_result,
options.parser_result_tag)
scenario_base.store_results_dir(
scenario_package_dirpath, copied_dirpath)
scenario_base.write_config(
scenario_package_dirpath,
status_version=harness.status_version,
parser_result_tag=options.parser_result_tag,
)
scenario_base.install_unittest_module(
scenario_package_dirpath, options.template_type)
tmp_dirpath.clean()
if __name__ == '__main__':
main()
|
xyuanmu/XX-Net | refs/heads/master | python3.8.2/Lib/urllib/error.py | 35 | """Exception classes raised by urllib.
The base exception class is URLError, which inherits from OSError. It
doesn't define any behavior of its own, but is the base class for all
exceptions defined in this package.
HTTPError is an exception class that is also a valid HTTP response
instance. It behaves this way because HTTP protocol errors are valid
responses, with a status code, headers, and a body. In some contexts,
an application may want to handle an exception like a regular
response.
"""
import urllib.response
__all__ = ['URLError', 'HTTPError', 'ContentTooShortError']
class URLError(OSError):
# URLError is a sub-type of OSError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other OSError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason, filename=None):
self.args = reason,
self.reason = reason
if filename is not None:
self.filename = filename
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, urllib.response.addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = urllib.response.addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
def __repr__(self):
return '<HTTPError %s: %r>' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 for discussion.
@property
def reason(self):
return self.msg
@property
def headers(self):
return self.hdrs
@headers.setter
def headers(self, headers):
self.hdrs = headers
class ContentTooShortError(URLError):
"""Exception raised when downloaded size does not match content-length."""
def __init__(self, message, content):
URLError.__init__(self, message)
self.content = content
|
ningchi/scikit-learn | refs/heads/master | sklearn/covariance/graph_lasso_.py | 7 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import _check_cv as check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. Only used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False,
assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose,
return_n_iter=True)
return self
|
Mythirion/VirtualRobot | refs/heads/master | porc/setup (2).py | 12 | #!/usr/bin/env python
import os
import sys
import requests
from codecs import open
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'requests',
'requests.packages',
'requests.packages.chardet',
'requests.packages.urllib3',
'requests.packages.urllib3.packages',
'requests.packages.urllib3.contrib',
'requests.packages.urllib3.util',
'requests.packages.urllib3.packages.ssl_match_hostname',
]
requires = []
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
with open('HISTORY.rst', 'r', 'utf-8') as f:
history = f.read()
setup(
name='requests',
version=requests.__version__,
description='Python HTTP for Humans.',
long_description=readme + '\n\n' + history,
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
url='http://python-requests.org',
packages=packages,
package_data={'': ['LICENSE', 'NOTICE'], 'requests': ['*.pem']},
package_dir={'requests': 'requests'},
include_package_data=True,
install_requires=requires,
license='Apache 2.0',
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
),
extras_require={
'security': ['pyOpenSSL', 'ndg-httpsclient', 'pyasn1'],
},
)
|
Newman101/scipy | refs/heads/master | scipy/optimize/_lsq/trf.py | 56 | """Trust Region Reflective algorithm for least-squares optimization.
The algorithm is based on ideas from paper [STIR]_. The main idea is to
account for presence of the bounds by appropriate scaling of the variables (or
equivalently changing a trust-region shape). Let's introduce a vector v:
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
where g is the gradient of a cost function and lb, ub are the bounds. Its
components are distances to the bounds at which the anti-gradient points (if
this distance is finite). Define a scaling matrix D = diag(v**0.5).
First-order optimality conditions can be stated as
D^2 g(x) = 0.
Meaning that components of the gradient should be zero for strictly interior
variables, and components must point inside the feasible region for variables
on the bound.
Now consider this system of equations as a new optimization problem. If the
point x is strictly interior (not on the bound) then the left-hand side is
differentiable and the Newton step for it satisfies
(D^2 H + diag(g) Jv) p = -D^2 g
where H is the Hessian matrix (or its J^T J approximation in least squares),
Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all
elements of matrix C = diag(g) Jv are non-negative. Introduce the change
of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables
we have a Newton step satisfying
B_h p_h = -g_h,
where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where
J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect
to "hat" variables. To guarantee global convergence we formulate a
trust-region problem based on the Newton step in the new variables:
0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta
In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region
problem is
0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta
Here the meaning of the matrix D becomes more clear: it alters the shape
of a trust-region, such that large steps towards the bounds are not allowed.
In the implementation the trust-region problem is solved in "hat" space,
but handling of the bounds is done in the original space (see below and read
the code).
The introduction of the matrix D doesn't allow to ignore bounds, the algorithm
must keep iterates strictly feasible (to satisfy aforementioned
differentiability), the parameter theta controls step back from the boundary
(see the code for details).
The algorithm does another important trick. If the trust-region solution
doesn't fit into the bounds, then a reflected (from a firstly encountered
bound) search direction is considered. For motivation and analysis refer to
[STIR]_ paper (and other papers of the authors). In practice it doesn't need
a lot of justifications, the algorithm simply chooses the best step among
three: a constrained trust-region step, a reflected step and a constrained
Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original
space).
Another feature is that a trust-region radius control strategy is modified to
account for appearance of the diagonal C matrix (called diag_h in the code).
Note, that all described peculiarities are completely gone as we consider
problems without bounds (the algorithm becomes a standard trust-region type
algorithm very similar to ones implemented in MINPACK).
The implementation supports two methods of solving the trust-region problem.
The first, called 'exact', applies SVD on Jacobian and then solves the problem
very accurately using the algorithm described in [JJMore]_. It is not
applicable to large problem. The second, called 'lsmr', uses the 2-D subspace
approach (sometimes called "indefinite dogleg"), where the problem is solved
in a subspace spanned by the gradient and the approximate Gauss-Newton step
found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is
reformulated as a 4-th order algebraic equation and solved very accurately by
``numpy.roots``. The subspace approach allows to solve very large problems
(up to couple of millions of residuals on a regular PC), provided the Jacobian
matrix is sufficiently sparse.
References
----------
.. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm
from scipy.linalg import svd, qr
from scipy.sparse.linalg import LinearOperator, lsmr
from scipy.optimize import OptimizeResult
from scipy._lib.six import string_types
from .common import (
step_size_to_bound, find_active_constraints, in_bounds,
make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region,
solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d,
evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator,
CL_scaling_vector, compute_grad, compute_jac_scale, check_termination,
update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear,
print_iteration_nonlinear)
def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose):
# For efficiency it makes sense to run the simplified version of the
# algorithm when no bounds are imposed. We decided to write the two
# separate functions. It violates DRY principle, but the individual
# functions are kept the most readable.
if np.all(lb == -np.inf) and np.all(ub == np.inf):
return trf_no_bounds(
fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
else:
return trf_bounds(
fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,
loss_function, tr_solver, tr_options, verbose)
def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
"""Select the best step according to Trust Region Reflective algorithm."""
if in_bounds(x + p, lb, ub):
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
return p, p_h, -p_value
p_stride, hits = step_size_to_bound(x, p, lb, ub)
# Compute the reflected direction.
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= -1
r = d * r_h
# Restrict trust-region step, such that it hits the bound.
p *= p_stride
p_h *= p_stride
x_on_bound = x + p
# Reflected direction will cross first either feasible region or trust
# region boundary.
_, to_tr = intersect_trust_region(p_h, r_h, Delta)
to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub)
# Find lower and upper bounds on a step size along the reflected
# direction, considering the strict feasibility requirement. There is no
# single correct way to do that, the chosen approach seems to work best
# on test problems.
r_stride = min(to_bound, to_tr)
if r_stride > 0:
r_stride_l = (1 - theta) * p_stride / r_stride
if r_stride == to_bound:
r_stride_u = theta * to_bound
else:
r_stride_u = to_tr
else:
r_stride_l = 0
r_stride_u = -1
# Check if reflection step is available.
if r_stride_l <= r_stride_u:
a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
r_stride, r_value = minimize_quadratic_1d(
a, b, r_stride_l, r_stride_u, c=c)
r_h *= r_stride
r_h += p_h
r = r_h * d
else:
r_value = np.inf
# Now correct p_h to make it strictly interior.
p *= theta
p_h *= theta
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
ag_h = -g_h
ag = d * ag_h
to_tr = Delta / norm(ag_h)
to_bound, _ = step_size_to_bound(x, ag, lb, ub)
if to_bound < to_tr:
ag_stride = theta * to_bound
else:
ag_stride = to_tr
a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride)
ag_h *= ag_stride
ag *= ag_stride
if p_value < r_value and p_value < ag_value:
return p, p_h, -p_value
elif r_value < p_value and r_value < ag_value:
return r, r_h, -r_value
else:
return ag, ag_h, -ag_value
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
v, dv = CL_scaling_vector(x, g, lb, ub)
v[dv != 0] *= scale_inv[dv != 0]
Delta = norm(x0 * scale_inv / v**0.5)
if Delta == 0:
Delta = 1.0
g_norm = norm(g * v, ord=np.inf)
f_augmented = np.zeros((m + n))
if tr_solver == 'exact':
J_augmented = np.empty((m + n, n))
elif tr_solver == 'lsmr':
reg_term = 0.0
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
v, dv = CL_scaling_vector(x, g, lb, ub)
g_norm = norm(g * v, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
# Now compute variables in "hat" space. Here we also account for
# scaling introduced by `x_scale` parameter. This part is a bit tricky,
# you have to write down the formulas and see how the trust-region
# problem is formulated when the two types of scaling are applied.
# The idea is that first we apply `x_scale` and then apply Coleman-Li
# approach in the new variables.
# v is recomputed in the variables after applying `x_scale`, note that
# components which were identically 1 not affected.
v[dv != 0] *= scale_inv[dv != 0]
# Here we apply two types of scaling.
d = v**0.5 * scale
# C = diag(g * scale) Jv
diag_h = g * dv * scale
# After all this were done, we continue normally.
# "hat" gradient.
g_h = d * g
f_augmented[:m] = f
if tr_solver == 'exact':
J_augmented[:m] = J * d
J_h = J_augmented[:m] # Memory view.
J_augmented[m:] = np.diag(diag_h**0.5)
U, s, V = svd(J_augmented, full_matrices=False)
V = V.T
uf = U.T.dot(f_augmented)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S) # LinearOperator does dot too.
B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
g_S = S.T.dot(g_h)
# theta controls step back step ratio from the bounds.
theta = max(0.995, 1 - g_norm)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
p_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
p_h = S.dot(p_S)
p = d * p_h # Trust-region solution in the original space.
step, step_h, predicted_reduction = select_step(
x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)
x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
# Correction term is specific to the algorithm,
# vanishes in unbounded case.
correction = 0.5 * np.dot(step_h * diag_h, step_h)
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction - correction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta
)
alpha *= Delta / Delta_new
Delta = Delta_new
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev,
x_scale, loss_function, tr_solver, tr_options, verbose):
x = x0.copy()
f = f0
f_true = f.copy()
nfev = 1
J = J0
njev = 1
m, n = J.shape
if loss_function is not None:
rho = loss_function(f)
cost = 0.5 * np.sum(rho[0])
J, f = scale_for_robust_loss_function(J, f, rho)
else:
cost = 0.5 * np.dot(f, f)
g = compute_grad(J, f)
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if jac_scale:
scale, scale_inv = compute_jac_scale(J)
else:
scale, scale_inv = x_scale, 1 / x_scale
Delta = norm(x0 * scale_inv)
if Delta == 0:
Delta = 1.0
if tr_solver == 'lsmr':
reg_term = 0
damp = tr_options.pop('damp', 0.0)
regularize = tr_options.pop('regularize', True)
if max_nfev is None:
max_nfev = x0.size * 100
alpha = 0.0 # "Levenberg-Marquardt" parameter
termination_status = None
iteration = 0
step_norm = None
actual_reduction = None
if verbose == 2:
print_header_nonlinear()
while True:
g_norm = norm(g, ord=np.inf)
if g_norm < gtol:
termination_status = 1
if verbose == 2:
print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
step_norm, g_norm)
if termination_status is not None or nfev == max_nfev:
break
d = scale
g_h = d * g
if tr_solver == 'exact':
J_h = J * d
U, s, V = svd(J_h, full_matrices=False)
V = V.T
uf = U.T.dot(f)
elif tr_solver == 'lsmr':
J_h = right_multiplied_operator(J, d)
if regularize:
a, b = build_quadratic_1d(J_h, g_h, -g_h)
to_tr = Delta / norm(g_h)
ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
reg_term = -ag_value / Delta**2
damp_full = (damp**2 + reg_term)**0.5
gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
S = np.vstack((g_h, gn_h)).T
S, _ = qr(S, mode='economic')
JS = J_h.dot(S)
B_S = np.dot(JS.T, JS)
g_S = S.T.dot(g_h)
actual_reduction = -1
while actual_reduction <= 0 and nfev < max_nfev:
if tr_solver == 'exact':
step_h, alpha, n_iter = solve_lsq_trust_region(
n, m, uf, s, V, Delta, initial_alpha=alpha)
elif tr_solver == 'lsmr':
p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
step_h = S.dot(p_S)
predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
step = d * step_h
x_new = x + step
f_new = fun(x_new)
nfev += 1
step_h_norm = norm(step_h)
if not np.all(np.isfinite(f_new)):
Delta = 0.25 * step_h_norm
continue
# Usual trust-region step quality estimation.
if loss_function is not None:
cost_new = loss_function(f_new, cost_only=True)
else:
cost_new = 0.5 * np.dot(f_new, f_new)
actual_reduction = cost - cost_new
Delta_new, ratio = update_tr_radius(
Delta, actual_reduction, predicted_reduction,
step_h_norm, step_h_norm > 0.95 * Delta)
alpha *= Delta / Delta_new
Delta = Delta_new
step_norm = norm(step)
termination_status = check_termination(
actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)
if termination_status is not None:
break
if actual_reduction > 0:
x = x_new
f = f_new
f_true = f.copy()
cost = cost_new
J = jac(x, f)
njev += 1
if loss_function is not None:
rho = loss_function(f)
J, f = scale_for_robust_loss_function(J, f, rho)
g = compute_grad(J, f)
if jac_scale:
scale, scale_inv = compute_jac_scale(J, scale_inv)
else:
step_norm = 0
actual_reduction = 0
iteration += 1
if termination_status is None:
termination_status = 0
active_mask = np.zeros_like(x)
return OptimizeResult(
x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev,
status=termination_status)
|
JamesShaeffer/QGIS | refs/heads/master | tests/src/python/test_qgslegendpatchshapewidget.py | 31 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLegendPatchShapeWidget.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2020 by Nyall Dawson'
__date__ = '20/04/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsLegendPatchShape,
QgsGeometry,
QgsSymbol
)
from qgis.gui import QgsLegendPatchShapeWidget
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLegendPatchShapeWidget(unittest.TestCase):
def testWidget(self):
shape = QgsLegendPatchShape(QgsSymbol.Line, QgsGeometry.fromWkt('LineString( 0 0, 1 1)'), False)
widget = QgsLegendPatchShapeWidget(None, shape)
self.assertEqual(widget.shape().geometry().asWkt(), 'LineString (0 0, 1 1)')
self.assertFalse(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Line)
shape = QgsLegendPatchShape(QgsSymbol.Line, QgsGeometry.fromWkt('LineString( 0 0, 1 1)'), True)
widget = QgsLegendPatchShapeWidget(None, shape)
self.assertEqual(widget.shape().geometry().asWkt(), 'LineString (0 0, 1 1)')
self.assertTrue(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Line)
shape = QgsLegendPatchShape(QgsSymbol.Fill, QgsGeometry.fromWkt('Polygon((5 5, 1 2, 3 4, 5 5))'), False)
widget = QgsLegendPatchShapeWidget(None, shape)
self.assertEqual(widget.shape().geometry().asWkt(), 'Polygon ((5 5, 1 2, 3 4, 5 5))')
self.assertFalse(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Fill)
shape = QgsLegendPatchShape(QgsSymbol.Marker, QgsGeometry.fromWkt('MultiPoint((5 5), (1 2))'))
widget = QgsLegendPatchShapeWidget(None, shape)
self.assertEqual(widget.shape().geometry().asWkt(), 'MultiPoint ((5 5),(1 2))')
self.assertTrue(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Marker)
def testSignals(self):
shape = QgsLegendPatchShape(QgsSymbol.Line, QgsGeometry.fromWkt('LineString( 0 0, 1 1)'), False)
widget = QgsLegendPatchShapeWidget(None, shape)
spy = QSignalSpy(widget.changed)
widget.setShape(shape)
self.assertEqual(len(spy), 0)
self.assertFalse(widget.shape().preserveAspectRatio())
shape = QgsLegendPatchShape(QgsSymbol.Line, QgsGeometry.fromWkt('LineString( 0 0, 1 1)'), True)
widget.setShape(shape)
self.assertEqual(len(spy), 1)
self.assertTrue(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().geometry().asWkt(), 'LineString (0 0, 1 1)')
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Line)
shape = QgsLegendPatchShape(QgsSymbol.Line, QgsGeometry.fromWkt('LineString( 0 0, 1 2)'), True)
widget.setShape(shape)
self.assertEqual(len(spy), 2)
self.assertTrue(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().geometry().asWkt(), 'LineString (0 0, 1 2)')
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Line)
shape = QgsLegendPatchShape(QgsSymbol.Marker, QgsGeometry.fromWkt('MultiPoint((5 5), (1 2))'), True)
widget.setShape(shape)
self.assertEqual(len(spy), 3)
self.assertTrue(widget.shape().preserveAspectRatio())
self.assertEqual(widget.shape().geometry().asWkt(), 'MultiPoint ((5 5),(1 2))')
self.assertEqual(widget.shape().symbolType(), QgsSymbol.Marker)
if __name__ == '__main__':
unittest.main()
|
djo938/pyshell | refs/heads/master | pyshell/utils/test/postprocess_test.py | 3 | #!/usr/bin/env python -t
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Jonathan Delvaux <pyshell@djoproject.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyshell.arg.checker.default import DefaultChecker
from pyshell.arg.checker.integer import IntegerArgChecker
from pyshell.system.manager.parent import ParentManager
from pyshell.system.parameter.context import ContextParameter
from pyshell.system.parameter.environment import EnvironmentParameter
from pyshell.system.setting.context import ContextGlobalSettings
from pyshell.system.setting.environment import EnvironmentGlobalSettings
from pyshell.utils.constants import CONTEXT_COLORATION_DARK
from pyshell.utils.constants import CONTEXT_COLORATION_KEY
from pyshell.utils.constants import CONTEXT_COLORATION_LIGHT
from pyshell.utils.constants import CONTEXT_COLORATION_NONE
from pyshell.utils.constants import CONTEXT_EXECUTION_DAEMON
from pyshell.utils.constants import CONTEXT_EXECUTION_KEY
from pyshell.utils.constants import CONTEXT_EXECUTION_SHELL
from pyshell.utils.constants import DEBUG_ENVIRONMENT_NAME
from pyshell.utils.constants import ENVIRONMENT_TAB_SIZE_KEY
from pyshell.utils.postprocess import listFlatResultHandler
from pyshell.utils.postprocess import listResultHandler
from pyshell.utils.postprocess import printBytesAsString
from pyshell.utils.postprocess import printColumn
from pyshell.utils.postprocess import printColumnWithouHeader
from pyshell.utils.postprocess import printStringCharResult
from pyshell.utils.printing import Printer
class TestPostProcess(object):
def setup_method(self, method):
p = Printer.getInstance()
self.params = ParentManager()
##
checker = DefaultChecker.getInteger()
self.debugContext = ContextParameter(
value=tuple(range(0, 91)),
settings=ContextGlobalSettings(checker=checker))
self.params.getContextManager().setParameter(DEBUG_ENVIRONMENT_NAME,
self.debugContext,
local_param=True)
self.debugContext.settings.setRemovable(False)
self.debugContext.settings.tryToSetDefaultIndex(0)
self.debugContext.settings.setReadOnly(True)
self.debugContext.settings.tryToSetIndex(0)
##
checker = DefaultChecker.getString()
self.shellContext = ContextParameter(
value=(CONTEXT_EXECUTION_SHELL,
CONTEXT_EXECUTION_DAEMON,),
settings=ContextGlobalSettings(checker=checker))
self.params.getContextManager().setParameter(CONTEXT_EXECUTION_KEY,
self.shellContext,
local_param=True)
self.shellContext.settings.setRemovable(False)
self.shellContext.settings.tryToSetDefaultIndex(0)
self.shellContext.settings.setReadOnly(True)
##
checker = DefaultChecker.getString()
self.backgroundContext = ContextParameter(
value=(CONTEXT_COLORATION_LIGHT,
CONTEXT_COLORATION_DARK,
CONTEXT_COLORATION_NONE,),
settings=ContextGlobalSettings(checker=checker))
self.params.getContextManager().setParameter(CONTEXT_COLORATION_KEY,
self.backgroundContext,
local_param=True)
self.backgroundContext.settings.setRemovable(False)
self.backgroundContext.settings.tryToSetDefaultIndex(0)
self.backgroundContext.settings.setReadOnly(True)
##
self.spacingContext = EnvironmentParameter(
value=5,
settings=EnvironmentGlobalSettings(
checker=IntegerArgChecker(0)))
self.params.getEnvironmentManager().setParameter(
ENVIRONMENT_TAB_SIZE_KEY,
self.spacingContext,
local_param=True)
self.spacingContext.settings.setRemovable(False)
self.spacingContext.settings.setReadOnly(False)
p.setParameters(self.params)
def test_listResultHandler1(self, capsys):
listResultHandler(())
out, err = capsys.readouterr()
assert out == ""
def test_listResultHandler2(self, capsys):
listResultHandler(("aa",))
out, err = capsys.readouterr()
assert out == " aa\n"
def test_listResultHandler3(self, capsys):
listResultHandler(("aa", 42,))
out, err = capsys.readouterr()
assert out == " aa\n 42\n"
###
def test_listFlatResultHandler1(self, capsys):
listFlatResultHandler(())
out, err = capsys.readouterr()
assert out == " \n"
def test_listFlatResultHandler2(self, capsys):
listFlatResultHandler(("aa",))
out, err = capsys.readouterr()
assert out == " aa\n"
def test_listFlatResultHandler3(self, capsys):
listFlatResultHandler(("aa", 42,))
out, err = capsys.readouterr()
assert out == " aa 42\n"
###
def test_printStringCharResult1(self, capsys):
printStringCharResult(())
out, err = capsys.readouterr()
assert out == " \n"
def test_printStringCharResult2(self, capsys):
printStringCharResult((60,))
out, err = capsys.readouterr()
assert out == " <\n"
def test_printStringCharResult3(self, capsys):
printStringCharResult((60, 42,))
out, err = capsys.readouterr()
assert out == " <*\n"
###
def test_printBytesAsString1(self, capsys):
printBytesAsString(())
out, err = capsys.readouterr()
assert out == " \n"
def test_printBytesAsString2(self, capsys):
printBytesAsString((0x25,))
out, err = capsys.readouterr()
assert out == " 25\n"
def test_printBytesAsString3(self, capsys):
printBytesAsString((0x25, 0x42,))
out, err = capsys.readouterr()
assert out == " 2542\n"
###
def test_printColumnWithouHeader1(self, capsys):
printColumnWithouHeader(())
out, err = capsys.readouterr()
assert out == ""
def test_printColumnWithouHeader2(self, capsys):
printColumnWithouHeader(("TOTO",))
out, err = capsys.readouterr()
assert out == " TOTO\n"
def test_printColumnWithouHeader3(self, capsys):
printColumnWithouHeader(("TOTO", "TUTUTU",))
out, err = capsys.readouterr()
assert out == " TOTO\n TUTUTU\n"
def test_printColumnWithouHeader4(self, capsys):
printColumnWithouHeader(("TOTO", "TUTUTU", "tata",))
out, err = capsys.readouterr()
assert out == " TOTO\n TUTUTU\n tata\n"
def test_printColumnWithouHeader5(self, capsys):
printColumnWithouHeader((("TOTO", "tata"), "TUTUTU",))
out, err = capsys.readouterr()
assert out == " TOTO tata\n TUTUTU\n"
def test_printColumnWithouHeader6(self, capsys):
printColumnWithouHeader((("TOTO", "tata"),
"TUTUTU",
("aaaaaaaaaa", "bbbbbbbb", "cccccc",),))
out, err = capsys.readouterr()
assert out == (" TOTO tata\n TUTUTU\n aaaaaaaaaa"
" bbbbbbbb cccccc\n")
###
def test_printColumn1(self, capsys):
printColumn(())
out, err = capsys.readouterr()
assert out == ""
def test_printColumn2(self, capsys):
printColumn(("TOTO",))
out, err = capsys.readouterr()
assert out == " TOTO\n"
def test_printColumn3(self, capsys):
printColumn(("TOTO", "TUTUTU",))
out, err = capsys.readouterr()
assert out == " TOTO\n TUTUTU\n"
def test_printColumn4(self, capsys):
printColumn(("TOTO", "TUTUTU", "tata",))
out, err = capsys.readouterr()
assert out == " TOTO\n TUTUTU\n tata\n"
def test_printColumn5(self, capsys):
printColumn((("TOTO", "tata"), "TUTUTU",))
out, err = capsys.readouterr()
assert out == " TOTO tata\n TUTUTU\n"
def test_printColumn6(self, capsys):
printColumn((("TOTO", "tata"),
"TUTUTU",
("aaaaaaaaaa", "bbbbbbbb", "cccccc",),))
out, err = capsys.readouterr()
assert out == (" TOTO tata\n TUTUTU\n aaaaaaaaaa"
" bbbbbbbb cccccc\n")
def test_printColumn7(self, capsys):
printColumn((("TOTO", "tata", "plapplap"),
"TUTUTU",
("aaaaaaaaaa", "bbbbbbbb", "cccccc",),
("lalala", "lulu"),))
out, err = capsys.readouterr()
assert out == (" TOTO tata plapplap\n TUTUTU\n"
" aaaaaaaaaa bbbbbbbb cccccc\n lalala"
" lulu\n")
|
qiuminxu/tensorboard | refs/heads/master | tensorboard/plugins/debugger/numerics_alert.py | 3 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data structures and algorithms for numerics alert.
The alerts are generated when a Tensor's elements contain bad values,
including nan, -inf and +inf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import re
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorboard.plugins.debugger import constants
# The following two namedtuples are the same except that
# 1) `timestamp` in `NumericsAlert` is the timestamp of a single alerting event,
# while `first_timestamp` in `NumericsAlertReportRow` is the first (earliest)
# timestamp of a se tof aggregated `NumericsAlert`s.
# 2) The counts in `NumericsAlert` are the counts of elements in the tensor,
# while the event counts in `NumericsAlertReportRow` are counts of previous
# `NumericsAlert` events of the corresponding categories.
NumericsAlert = collections.namedtuple(
"NumericsAlert",
["device_name", "tensor_name", "timestamp", "nan_count", "neg_inf_count",
"pos_inf_count"])
NumericsAlertReportRow = collections.namedtuple(
"NumericsAlertReportRow",
["device_name", "tensor_name", "first_timestamp", "nan_event_count",
"neg_inf_event_count", "pos_inf_event_count"])
# Used to reconstruct an _EventTracker from data read from disk. When updating
# this named tuple, make sure to keep the properties of _EventTracker in sync.
EventTrackerDescription = collections.namedtuple(
"EventTrackerDescription",
["event_count", "first_timestamp", "last_timestamp"])
# Used to reconstruct NumericsAlertHistory.
HistoryTriplet = collections.namedtuple(
"HistoryTriplet",
["device", "tensor", "jsonable_history"])
class _EventTracker(object):
"""Track events for a single category of values (NaN, -Inf, or +Inf)."""
def __init__(self, event_count=0, first_timestamp=-1, last_timestamp=-1):
"""Tracks events for a single category of values.
Args:
event_count: The initial event count to use.
first_timestamp: The timestamp of the first event with this value.
last_timestamp: The timestamp of the last event with this category of
values.
"""
# When updating the properties of this class, make sure to keep
# EventTrackerDescription in sync so that data can be written to and from
# disk correctly.
self.event_count = event_count
self.first_timestamp = first_timestamp
self.last_timestamp = last_timestamp
def add(self, timestamp):
if self.event_count == 0:
self.first_timestamp = timestamp
self.last_timestamp = timestamp
else:
if timestamp < self.first_timestamp:
self.first_timestamp = timestamp
if timestamp > self.last_timestamp:
self.last_timestamp = timestamp
self.event_count += 1
def get_description(self):
return EventTrackerDescription(
self.event_count, self.first_timestamp, self.last_timestamp)
class NumericsAlertHistory(object):
"""History of numerics alerts."""
def __init__(self, initialization_list=None):
"""Stores alert history for a single device, tensor pair.
Args:
initialization_list: (`list`) An optional list parsed from JSON read
from disk. That entity is used to initialize this NumericsAlertHistory.
Use the create_jsonable_object method of this class to create such an
object.
"""
if initialization_list:
# Use data to initialize this NumericsAlertHistory.
self._trackers = {}
for value_category_key, description_list in initialization_list.items():
description = EventTrackerDescription._make(description_list)
self._trackers[value_category_key] = _EventTracker(
event_count=description.event_count,
first_timestamp=description.first_timestamp,
last_timestamp=description.last_timestamp)
else:
# Start cleanly. With no prior data.
self._trackers = {
constants.NAN_KEY: _EventTracker(),
constants.NEG_INF_KEY: _EventTracker(),
constants.POS_INF_KEY: _EventTracker(),
}
def add(self, numerics_alert):
if numerics_alert.nan_count:
self._trackers[constants.NAN_KEY].add(numerics_alert.timestamp)
if numerics_alert.neg_inf_count:
self._trackers[constants.NEG_INF_KEY].add(numerics_alert.timestamp)
if numerics_alert.pos_inf_count:
self._trackers[constants.POS_INF_KEY].add(numerics_alert.timestamp)
def first_timestamp(self, event_key=None):
"""Obtain the first timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY).
If None, includes all event type keys.
Returns:
First (earliest) timestamp of all the events of the given type (or all
event types if event_key is None).
"""
if event_key is None:
timestamps = [self._trackers[key].first_timestamp
for key in self._trackers]
return min(timestamp for timestamp in timestamps if timestamp >= 0)
else:
return self._trackers[event_key].first_timestamp
def last_timestamp(self, event_key=None):
"""Obtain the last timestamp.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY). If
None, includes all event type keys.
Returns:
Last (latest) timestamp of all the events of the given type (or all
event types if event_key is None).
"""
if event_key is None:
timestamps = [self._trackers[key].first_timestamp
for key in self._trackers]
return max(timestamp for timestamp in timestamps if timestamp >= 0)
else:
return self._trackers[event_key].last_timestamp
def event_count(self, event_key):
"""Obtain event count.
Args:
event_key: the type key of the sought events (e.g., constants.NAN_KEY). If
None, includes all event type keys.
Returns:
If event_key is None, return the sum of the event_count of all event
types. Otherwise, return the event_count of the specified event type.
"""
return self._trackers[event_key].event_count
def create_jsonable_history(self):
"""Creates a JSON-able representation of this object.
Returns:
A dictionary mapping key to EventTrackerDescription (which can be used to
create event trackers).
"""
return {value_category_key: tracker.get_description()
for (value_category_key, tracker) in self._trackers.items()}
class NumericsAlertRegistry(object):
"""A registry for alerts on numerics (e.g., due to NaNs and infinities)."""
def __init__(self, capacity=100, initialization_list=None):
"""Constructor.
Args:
capacity: (`int`) maximum number of device-tensor keys to store.
initialization_list: (`list`) An optional list (parsed from JSON) that
is used to initialize the data within this registry. Use the
create_jsonable_registry method of NumericsAlertRegistry to create such
a list.
"""
self._capacity = capacity
# A map from device-tensor key to a the TensorAlertRecord namedtuple.
# The device-tensor key is a 2-tuple of the format (device_name, node_name).
# E.g., ("/job:worker/replica:0/task:1/gpu:0", "cross_entropy/Log:0").
self._data = dict()
if initialization_list:
# Initialize the alert registry using the data passed in. This might be
# backup data used to restore the registry after say a borg pre-emption.
for entry in initialization_list:
triplet = HistoryTriplet._make(entry)
self._data[(triplet.device, triplet.tensor)] = NumericsAlertHistory(
initialization_list=triplet.jsonable_history)
def register(self, numerics_alert):
"""Register an alerting numeric event.
Args:
numerics_alert: An instance of `NumericsAlert`.
"""
key = (numerics_alert.device_name, numerics_alert.tensor_name)
if key in self._data:
self._data[key].add(numerics_alert)
else:
if len(self._data) < self._capacity:
history = NumericsAlertHistory()
history.add(numerics_alert)
self._data[key] = history
def report(self, device_name_filter=None, tensor_name_filter=None):
"""Get a report of offending device/tensor names.
The report includes information about the device name, tensor name, first
(earliest) timestamp of the alerting events from the tensor, in addition to
counts of nan, positive inf and negative inf events.
Args:
device_name_filter: regex filter for device name, or None (not filtered).
tensor_name_filter: regex filter for tensor name, or None (not filtered).
Returns:
A list of NumericsAlertReportRow, sorted by the first_timestamp in
asecnding order.
"""
report = []
for key in self._data:
device_name, tensor_name = key
history = self._data[key]
report.append(
NumericsAlertReportRow(
device_name=device_name,
tensor_name=tensor_name,
first_timestamp=history.first_timestamp(),
nan_event_count=history.event_count(constants.NAN_KEY),
neg_inf_event_count=history.event_count(constants.NEG_INF_KEY),
pos_inf_event_count=history.event_count(constants.POS_INF_KEY)))
if device_name_filter:
device_name_pattern = re.compile(device_name_filter)
report = [item for item in report
if device_name_pattern.match(item.device_name)]
if tensor_name_filter:
tensor_name_pattern = re.compile(tensor_name_filter)
report = [item for item in report
if tensor_name_pattern.match(item.tensor_name)]
# Sort results chronologically.
return sorted(report, key=lambda x: x.first_timestamp)
def create_jsonable_registry(self):
"""Creates a JSON-able representation of this object.
Returns:
A dictionary mapping (device, tensor name) to JSON-able object
representations of NumericsAlertHistory.
"""
# JSON does not support tuples as keys. Only strings. Therefore, we store
# the device name, tensor name, and dictionary data within a 3-item list.
return [HistoryTriplet(pair[0], pair[1], history.create_jsonable_history())
for (pair, history) in self._data.items()]
def extract_numerics_alert(event):
"""Determines whether a health pill event contains bad values.
A bad value is one of NaN, -Inf, or +Inf.
Args:
event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary`
ops.
Returns:
An instance of `NumericsAlert`, if bad values are found.
`None`, if no bad values are found.
Raises:
ValueError: if the event does not have the expected tag prefix or the
debug op name is not the expected debug op name suffix.
"""
value = event.summary.value[0]
debugger_plugin_metadata_content = None
if value.HasField("metadata"):
plugin_data = value.metadata.plugin_data
if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:
debugger_plugin_metadata_content = plugin_data.content
if not debugger_plugin_metadata_content:
raise ValueError("Event proto input lacks debugger plugin SummaryMetadata.")
debugger_plugin_metadata_content = tf.compat.as_text(
debugger_plugin_metadata_content)
try:
content_object = json.loads(debugger_plugin_metadata_content)
device_name = content_object["device"]
except (KeyError, ValueError) as e:
raise ValueError("Could not determine device from JSON string %r, %r" %
(debugger_plugin_metadata_content, e))
debug_op_suffix = ":DebugNumericSummary"
if not value.node_name.endswith(debug_op_suffix):
raise ValueError(
"Event proto input does not have the expected debug op suffix %s" %
debug_op_suffix)
tensor_name = value.node_name[:-len(debug_op_suffix)]
elements = tf_debug.load_tensor_from_event(event)
nan_count = elements[constants.NAN_NUMERIC_SUMMARY_OP_INDEX]
neg_inf_count = elements[constants.NEG_INF_NUMERIC_SUMMARY_OP_INDEX]
pos_inf_count = elements[constants.POS_INF_NUMERIC_SUMMARY_OP_INDEX]
if nan_count > 0 or neg_inf_count > 0 or pos_inf_count > 0:
return NumericsAlert(
device_name, tensor_name, event.wall_time, nan_count, neg_inf_count,
pos_inf_count)
return None
|
LostItem/roundware-server | refs/heads/develop | roundwared/audiotrack.py | 2 | # Roundware Server is released under the GNU Affero General Public License v3.
# See COPYRIGHT.txt, AUTHORS.txt, and LICENSE.txt in the project root directory.
# TODO: Figure out how to get the main pipeline to send EOS
# when all audiotracks are finished (only happens
# when repeat is off)
# TODO: Reimplement panning using a gst.Controller
# TODO: Remove stereo_pan from public interface
from __future__ import unicode_literals
import gobject
gobject.threads_init()
import pygst
pygst.require("0.10")
import gst
import random
import logging
import os
import time
from roundwared import src_wav_file
from roundwared import db
from django.conf import settings
from roundware.rw.models import Asset
STATE_PLAYING = 0
STATE_DEAD_AIR = 1
STATE_WAITING = 2
logger = logging.getLogger(__name__)
class AudioTrack:
######################################################################
# PUBLIC
######################################################################
def __init__(self, stream, pipeline, adder, settings, recording_collection):
self.stream = stream
self.pipeline = pipeline
self.adder = adder
self.settings = settings
self.rc = recording_collection
self.current_pan_pos = 0
self.target_pan_pos = 0
self.state = STATE_DEAD_AIR
self.src_wav_file = None
self.current_recording = None
# Incremented only after start_audio() is called.
self.track_timer = 0
def start_audio(self):
"""
Called once to start the audio manager timer
"""
def asset_start_timer():
"""
The asset timer runs once to start new assets after a certain
amount of dead air.
gobject timeout callbacks are repeated until they return False.
"""
self.add_file()
return False
def track_timer():
"""
The audio manager.
Timeout called every second to maintain the audio asset stream.
"""
logger.debug("AT State: %d, Stream state: %d" % (self.state, self.stream.get_state()))
# logger.debug("TickTock: %s" % self.track_timer)
self.track_timer += 1
# Do nothing if audio is playing already.
if self.state == STATE_PLAYING or self.stream.is_paused():
return True
# No audio playing and asset_timer_callback is not scheduled, this
# is set by self.clean_up() when an asset ends.
elif self.state == STATE_DEAD_AIR:
self.state = STATE_WAITING
# Generate a random amount of dead air.
deadair = random.randint(
self.settings.mindeadair,
self.settings.maxdeadair) / gst.MSECOND
# Attempt to start an asset in the future.
gobject.timeout_add(deadair, asset_start_timer)
return True
# http://www.pygtk.org/pygtk2reference/gobject-functions.html#function-gobject--timeout-add
# Call audio_timer_callback() every second.
gobject.timeout_add(1000, track_timer)
def stereo_pan(self):
if self.current_pan_pos == self.target_pan_pos \
or self.pan_steps_left == 0:
self.set_new_pan_target()
self.set_new_pan_duration()
else:
pan_distance = \
self.target_pan_pos - self.current_pan_pos
amount_to_pan_now = pan_distance / self.pan_steps_left
self.current_pan_pos += amount_to_pan_now
self.pan_steps_left -= 1
if self.src_wav_file:
self.src_wav_file.pan_to(self.current_pan_pos)
######################################################################
# PRIVATE
######################################################################
def add_file(self):
self.current_recording = self.rc.get_recording()
if not self.current_recording:
self.state = STATE_DEAD_AIR
self.set_track_metadata()
return
duration = min(
self.current_recording.audiolength,
random.randint(
# FIXME: I don't allow less than a second to
# play currently. Mostly because playing zero
# is an error. Revisit this.
max(self.settings.minduration,
gst.SECOND),
max(self.settings.maxduration,
gst.SECOND)))
start = random.randint(
0,
self.current_recording.audiolength - duration)
fadein = random.randint(
self.settings.minfadeintime,
self.settings.maxfadeintime)
fadeout = random.randint(
self.settings.minfadeouttime,
self.settings.maxfadeouttime)
# FIXME: Instead of doing this divide by two, instead,
# decrease them by the same percentage. Remember it's
# possible that fade_in != fade_out.
if fadein + fadeout > duration:
fadein = duration / 2
fadeout = duration / 2
volume = self.current_recording.volume * (
self.settings.minvolume +
random.random() *
(self.settings.maxvolume -
self.settings.minvolume))
# logger.debug("current_recording.filename: %s, start: %s, duration: %s, fadein: %s, fadeout: %s, volume: %s",
# self.current_recording.filename, start, duration, fadein, fadeout, volume)
logger.info("Session %s - Playing asset %s filename: %s, duration: %.2f secs" %
(self.stream.sessionid, self.current_recording.id,
self.current_recording.filename, duration / 1000000000.0))
self.src_wav_file = src_wav_file.SrcWavFile(
os.path.join(settings.MEDIA_ROOT,
self.current_recording.filename),
start, duration, fadein, fadeout, volume)
self.pipeline.add(self.src_wav_file)
self.srcpad = self.src_wav_file.get_pad('src')
self.addersinkpad = self.adder.get_request_pad('sink%d')
self.srcpad.link(self.addersinkpad)
# Add event watcher/callback
self.addersinkpad.add_event_probe(self.event_probe)
(ret, cur, pen) = self.pipeline.get_state()
self.src_wav_file.set_state(cur)
self.state = STATE_PLAYING
# Generate metadata for the current asset.
tags = [str(tag.id) for tag in self.current_recording.tags.all()]
self.set_track_metadata({'asset': self.current_recording.id,
'tags': ','.join(tags)})
db.add_asset_to_session_history(
self.current_recording.id, self.stream.sessionid, duration)
def event_probe(self, pad, event):
# End of current audio asset, start a new asset.
if event.type == gst.EVENT_EOS:
self.set_track_metadata({'asset': self.current_recording.id,
'complete': True, })
gobject.idle_add(self.clean_up)
# New asset added, seek to it's starting timestamp.
elif event.type == gst.EVENT_NEWSEGMENT:
gobject.idle_add(self.src_wav_file.seek_to_start)
return True
def clean_up(self):
if self.src_wav_file:
self.src_wav_file.set_state(gst.STATE_NULL)
self.pipeline.remove(self.src_wav_file)
self.adder.release_request_pad(self.addersinkpad)
self.state = STATE_DEAD_AIR
self.current_recording = None
self.src_wav_file = None
return False
def set_new_pan_target(self):
pan_step_size = (self.settings.maxpanpos -
self.settings.minpanpos) / \
settings.NUM_PAN_STEPS
target_pan_step = random.randint(0, settings.NUM_PAN_STEPS)
self.target_pan_pos = -1 + target_pan_step * pan_step_size
def set_new_pan_duration(self):
duration_in_gst_units = \
random.randint(
self.settings.minpanduration,
self.settings.maxpanduration)
duration_in_miliseconds = duration_in_gst_units / gst.MSECOND
self.pan_steps_left = duration_in_miliseconds / \
settings.STEREO_PAN_INTERVAL
def skip_ahead(self):
fadeoutnsecs = random.randint(
self.settings.minfadeouttime,
self.settings.maxfadeouttime)
if self.src_wav_file != None and not self.src_wav_file.fading:
logger.info("fading out for: " + str(round((fadeoutnsecs/1000000000),2)) + " sec")
self.src_wav_file.fade_out(fadeoutnsecs)
# 1st arg is in milliseconds
# 1000000000
#gobject.timeout_add(fadeoutnsecs/gst.MSECOND, self.clean_up)
# wait until fade is complete and then clean-up
time.sleep(fadeoutnsecs / 1000000000)
self.clean_up()
else:
logger.debug("skip_ahead: no src_wav_file")
def play_asset(self, asset_id):
logger.info("AudioTrack play asset: " + str(asset_id))
try:
asset = Asset.objects.get(id=str(asset_id))
self.rc.remove_asset_from_rc(asset)
self.rc.add_asset_to_rc(asset)
self.skip_ahead()
except Asset.DoesNotExist:
logger.error("Asset with ID %d does not exist." % asset_id)
def set_track_metadata(self, metadata={}):
"""
Sets Audiotrack specific metadata.
"""
data = {'audiotrack': self.settings.id,
'remaining': self.rc.count(),
}
data.update(metadata)
self.stream.set_metadata(data)
|
rchav/vinerack | refs/heads/master | saleor/product/urls.py | 19 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<slug>[a-z0-9-]+?)-(?P<product_id>[0-9]+)/$',
views.product_details, name='details'),
url(r'^category/(?P<path>[a-z0-9-_/]+?)-(?P<category_id>[0-9]+)/$',
views.category_index, name='category')
]
|
cchurch/ansible | refs/heads/devel | lib/ansible/modules/utilities/logic/include_tasks.py | 45 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'
}
DOCUMENTATION = r'''
---
author: Ansible Core Team (@ansible)
module: include_tasks
short_description: Dynamically include a task list
description:
- Includes a file with a list of tasks to be executed in the current playbook.
version_added: '2.4'
options:
file:
description:
- The name of the imported file is specified directly without any other option.
- Unlike M(import_tasks), most keywords, including loop, with_items, and conditionals, apply to this statement.
- The do until loop is not supported on M(include_tasks).
type: str
version_added: '2.7'
apply:
description:
- Accepts a hash of task keywords (e.g. C(tags), C(become)) that will be applied to the tasks within the include.
type: str
version_added: '2.7'
free-form:
description:
- |
Supplying a file name via free-form C(- include_tasks: file.yml) of a file to be included is the equivalent
of specifying an argument of I(file).
notes:
- This is a core feature of the Ansible, rather than a module, and cannot be overridden like a module.
seealso:
- module: import_playbook
- module: import_role
- module: import_tasks
- module: include_role
- ref: playbooks_reuse_includes
description: More information related to including and importing playbooks, roles and tasks.
'''
EXAMPLES = r'''
- hosts: all
tasks:
- debug:
msg: task1
- name: Include task list in play
include_tasks: stuff.yaml
- debug:
msg: task10
- hosts: all
tasks:
- debug:
msg: task1
- name: Include task list in play only if the condition is true
include_tasks: "{{ hostvar }}.yaml"
when: hostvar is defined
- name: Apply tags to tasks within included file
include_tasks:
file: install.yml
apply:
tags:
- install
tags:
- always
- name: Apply tags to tasks within included file when using free-form
include_tasks: install.yml
args:
apply:
tags:
- install
tags:
- always
'''
RETURN = r'''
# This module does not return anything except tasks to execute.
'''
|
bobwalker99/Pydev | refs/heads/master | plugins/org.python.pydev.refactoring/tests/python/codegenerator/generatedocstring/testGenerateDocstringJumpToEmpty.py | 8 | def function():
""
return True##|
##r
def function():
"##|"
return True
|
upiterbarg/mpmath | refs/heads/master | mpmath/libmp/libmpc.py | 7 | """
Low-level functions for complex arithmetic.
"""
import sys
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, BACKEND
from .libmpf import (\
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast, bitcount,
bctable, normalize, normalize1, reciprocal_rnd, rshift, lshift, giant_steps,
negative_rnd,
to_str, to_fixed, from_man_exp, from_float, to_float, from_int, to_int,
fzero, fone, ftwo, fhalf, finf, fninf, fnan, fnone,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul,
mpf_div, mpf_mul_int, mpf_shift, mpf_sqrt, mpf_hypot,
mpf_rdiv_int, mpf_floor, mpf_ceil, mpf_nint, mpf_frac,
mpf_sign, mpf_hash,
ComplexResult
)
from .libelefun import (\
mpf_pi, mpf_exp, mpf_log, mpf_cos_sin, mpf_cosh_sinh, mpf_tan, mpf_pow_int,
mpf_log_hypot,
mpf_cos_sin_pi, mpf_phi,
mpf_cos, mpf_sin, mpf_cos_pi, mpf_sin_pi,
mpf_atan, mpf_atan2, mpf_cosh, mpf_sinh, mpf_tanh,
mpf_asin, mpf_acos, mpf_acosh, mpf_nthroot, mpf_fibonacci
)
# An mpc value is a (real, imag) tuple
mpc_one = fone, fzero
mpc_zero = fzero, fzero
mpc_two = ftwo, fzero
mpc_half = (fhalf, fzero)
_infs = (finf, fninf)
_infs_nan = (finf, fninf, fnan)
def mpc_is_inf(z):
"""Check if either real or imaginary part is infinite"""
re, im = z
if re in _infs: return True
if im in _infs: return True
return False
def mpc_is_infnan(z):
"""Check if either real or imaginary part is infinite or nan"""
re, im = z
if re in _infs_nan: return True
if im in _infs_nan: return True
return False
def mpc_to_str(z, dps, **kwargs):
re, im = z
rs = to_str(re, dps)
if im[0]:
return rs + " - " + to_str(mpf_neg(im), dps, **kwargs) + "j"
else:
return rs + " + " + to_str(im, dps, **kwargs) + "j"
def mpc_to_complex(z, strict=False):
re, im = z
return complex(to_float(re, strict), to_float(im, strict))
def mpc_hash(z):
if sys.version >= "3.2":
re, im = z
h = mpf_hash(re) + sys.hash_info.imag * mpf_hash(im)
# Need to reduce either module 2^32 or 2^64
h = h % (2**sys.hash_info.width)
return int(h)
else:
try:
return hash(mpc_to_complex(z, strict=True))
except OverflowError:
return hash(z)
def mpc_conjugate(z, prec, rnd=round_fast):
re, im = z
return re, mpf_neg(im, prec, rnd)
def mpc_is_nonzero(z):
return z != mpc_zero
def mpc_add(z, w, prec, rnd=round_fast):
a, b = z
c, d = w
return mpf_add(a, c, prec, rnd), mpf_add(b, d, prec, rnd)
def mpc_add_mpf(z, x, prec, rnd=round_fast):
a, b = z
return mpf_add(a, x, prec, rnd), b
def mpc_sub(z, w, prec=0, rnd=round_fast):
a, b = z
c, d = w
return mpf_sub(a, c, prec, rnd), mpf_sub(b, d, prec, rnd)
def mpc_sub_mpf(z, p, prec=0, rnd=round_fast):
a, b = z
return mpf_sub(a, p, prec, rnd), b
def mpc_pos(z, prec, rnd=round_fast):
a, b = z
return mpf_pos(a, prec, rnd), mpf_pos(b, prec, rnd)
def mpc_neg(z, prec=None, rnd=round_fast):
a, b = z
return mpf_neg(a, prec, rnd), mpf_neg(b, prec, rnd)
def mpc_shift(z, n):
a, b = z
return mpf_shift(a, n), mpf_shift(b, n)
def mpc_abs(z, prec, rnd=round_fast):
"""Absolute value of a complex number, |a+bi|.
Returns an mpf value."""
a, b = z
return mpf_hypot(a, b, prec, rnd)
def mpc_arg(z, prec, rnd=round_fast):
"""Argument of a complex number. Returns an mpf value."""
a, b = z
return mpf_atan2(b, a, prec, rnd)
def mpc_floor(z, prec, rnd=round_fast):
a, b = z
return mpf_floor(a, prec, rnd), mpf_floor(b, prec, rnd)
def mpc_ceil(z, prec, rnd=round_fast):
a, b = z
return mpf_ceil(a, prec, rnd), mpf_ceil(b, prec, rnd)
def mpc_nint(z, prec, rnd=round_fast):
a, b = z
return mpf_nint(a, prec, rnd), mpf_nint(b, prec, rnd)
def mpc_frac(z, prec, rnd=round_fast):
a, b = z
return mpf_frac(a, prec, rnd), mpf_frac(b, prec, rnd)
def mpc_mul(z, w, prec, rnd=round_fast):
"""
Complex multiplication.
Returns the real and imaginary part of (a+bi)*(c+di), rounded to
the specified precision. The rounding mode applies to the real and
imaginary parts separately.
"""
a, b = z
c, d = w
p = mpf_mul(a, c)
q = mpf_mul(b, d)
r = mpf_mul(a, d)
s = mpf_mul(b, c)
re = mpf_sub(p, q, prec, rnd)
im = mpf_add(r, s, prec, rnd)
return re, im
def mpc_square(z, prec, rnd=round_fast):
# (a+b*I)**2 == a**2 - b**2 + 2*I*a*b
a, b = z
p = mpf_mul(a,a)
q = mpf_mul(b,b)
r = mpf_mul(a,b, prec, rnd)
re = mpf_sub(p, q, prec, rnd)
im = mpf_shift(r, 1)
return re, im
def mpc_mul_mpf(z, p, prec, rnd=round_fast):
a, b = z
re = mpf_mul(a, p, prec, rnd)
im = mpf_mul(b, p, prec, rnd)
return re, im
def mpc_mul_imag_mpf(z, x, prec, rnd=round_fast):
"""
Multiply the mpc value z by I*x where x is an mpf value.
"""
a, b = z
re = mpf_neg(mpf_mul(b, x, prec, rnd))
im = mpf_mul(a, x, prec, rnd)
return re, im
def mpc_mul_int(z, n, prec, rnd=round_fast):
a, b = z
re = mpf_mul_int(a, n, prec, rnd)
im = mpf_mul_int(b, n, prec, rnd)
return re, im
def mpc_div(z, w, prec, rnd=round_fast):
a, b = z
c, d = w
wp = prec + 10
# mag = c*c + d*d
mag = mpf_add(mpf_mul(c, c), mpf_mul(d, d), wp)
# (a*c+b*d)/mag, (b*c-a*d)/mag
t = mpf_add(mpf_mul(a,c), mpf_mul(b,d), wp)
u = mpf_sub(mpf_mul(b,c), mpf_mul(a,d), wp)
return mpf_div(t,mag,prec,rnd), mpf_div(u,mag,prec,rnd)
def mpc_div_mpf(z, p, prec, rnd=round_fast):
"""Calculate z/p where p is real"""
a, b = z
re = mpf_div(a, p, prec, rnd)
im = mpf_div(b, p, prec, rnd)
return re, im
def mpc_reciprocal(z, prec, rnd=round_fast):
"""Calculate 1/z efficiently"""
a, b = z
m = mpf_add(mpf_mul(a,a),mpf_mul(b,b),prec+10)
re = mpf_div(a, m, prec, rnd)
im = mpf_neg(mpf_div(b, m, prec, rnd))
return re, im
def mpc_mpf_div(p, z, prec, rnd=round_fast):
"""Calculate p/z where p is real efficiently"""
a, b = z
m = mpf_add(mpf_mul(a,a),mpf_mul(b,b), prec+10)
re = mpf_div(mpf_mul(a,p), m, prec, rnd)
im = mpf_div(mpf_neg(mpf_mul(b,p)), m, prec, rnd)
return re, im
def complex_int_pow(a, b, n):
"""Complex integer power: computes (a+b*I)**n exactly for
nonnegative n (a and b must be Python ints)."""
wre = 1
wim = 0
while n:
if n & 1:
wre, wim = wre*a - wim*b, wim*a + wre*b
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
return wre, wim
def mpc_pow(z, w, prec, rnd=round_fast):
if w[1] == fzero:
return mpc_pow_mpf(z, w[0], prec, rnd)
return mpc_exp(mpc_mul(mpc_log(z, prec+10), w, prec+10), prec, rnd)
def mpc_pow_mpf(z, p, prec, rnd=round_fast):
psign, pman, pexp, pbc = p
if pexp >= 0:
return mpc_pow_int(z, (-1)**psign * (pman<<pexp), prec, rnd)
if pexp == -1:
sqrtz = mpc_sqrt(z, prec+10)
return mpc_pow_int(sqrtz, (-1)**psign * pman, prec, rnd)
return mpc_exp(mpc_mul_mpf(mpc_log(z, prec+10), p, prec+10), prec, rnd)
def mpc_pow_int(z, n, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_pow_int(a, n, prec, rnd), fzero
if a == fzero:
v = mpf_pow_int(b, n, prec, rnd)
n %= 4
if n == 0:
return v, fzero
elif n == 1:
return fzero, v
elif n == 2:
return mpf_neg(v), fzero
elif n == 3:
return fzero, mpf_neg(v)
if n == 0: return mpc_one
if n == 1: return mpc_pos(z, prec, rnd)
if n == 2: return mpc_square(z, prec, rnd)
if n == -1: return mpc_reciprocal(z, prec, rnd)
if n < 0: return mpc_reciprocal(mpc_pow_int(z, -n, prec+4), prec, rnd)
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if asign: aman = -aman
if bsign: bman = -bman
de = aexp - bexp
abs_de = abs(de)
exact_size = n*(abs_de + max(abc, bbc))
if exact_size < 10000:
if de > 0:
aman <<= de
aexp = bexp
else:
bman <<= (-de)
bexp = aexp
re, im = complex_int_pow(aman, bman, n)
re = from_man_exp(re, int(n*aexp), prec, rnd)
im = from_man_exp(im, int(n*bexp), prec, rnd)
return re, im
return mpc_exp(mpc_mul_int(mpc_log(z, prec+10), n, prec+10), prec, rnd)
def mpc_sqrt(z, prec, rnd=round_fast):
"""Complex square root (principal branch).
We have sqrt(a+bi) = sqrt((r+a)/2) + b/sqrt(2*(r+a))*i where
r = abs(a+bi), when a+bi is not a negative real number."""
a, b = z
if b == fzero:
if a == fzero:
return (a, b)
# When a+bi is a negative real number, we get a real sqrt times i
if a[0]:
im = mpf_sqrt(mpf_neg(a), prec, rnd)
return (fzero, im)
else:
re = mpf_sqrt(a, prec, rnd)
return (re, fzero)
wp = prec+20
if not a[0]: # case a positive
t = mpf_add(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) + a
u = mpf_shift(t, -1) # u = t/2
re = mpf_sqrt(u, prec, rnd) # re = sqrt(u)
v = mpf_shift(t, 1) # v = 2*t
w = mpf_sqrt(v, wp) # w = sqrt(v)
im = mpf_div(b, w, prec, rnd) # im = b / w
else: # case a negative
t = mpf_sub(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) - a
u = mpf_shift(t, -1) # u = t/2
im = mpf_sqrt(u, prec, rnd) # im = sqrt(u)
v = mpf_shift(t, 1) # v = 2*t
w = mpf_sqrt(v, wp) # w = sqrt(v)
re = mpf_div(b, w, prec, rnd) # re = b/w
if b[0]:
re = mpf_neg(re)
im = mpf_neg(im)
return re, im
def mpc_nthroot_fixed(a, b, n, prec):
# a, b signed integers at fixed precision prec
start = 50
a1 = int(rshift(a, prec - n*start))
b1 = int(rshift(b, prec - n*start))
try:
r = (a1 + 1j * b1)**(1.0/n)
re = r.real
im = r.imag
re = MPZ(int(re))
im = MPZ(int(im))
except OverflowError:
a1 = from_int(a1, start)
b1 = from_int(b1, start)
fn = from_int(n)
nth = mpf_rdiv_int(1, fn, start)
re, im = mpc_pow((a1, b1), (nth, fzero), start)
re = to_int(re)
im = to_int(im)
extra = 10
prevp = start
extra1 = n
for p in giant_steps(start, prec+extra):
# this is slow for large n, unlike int_pow_fixed
re2, im2 = complex_int_pow(re, im, n-1)
re2 = rshift(re2, (n-1)*prevp - p - extra1)
im2 = rshift(im2, (n-1)*prevp - p - extra1)
r4 = (re2*re2 + im2*im2) >> (p + extra1)
ap = rshift(a, prec - p)
bp = rshift(b, prec - p)
rec = (ap * re2 + bp * im2) >> p
imc = (-ap * im2 + bp * re2) >> p
reb = (rec << p) // r4
imb = (imc << p) // r4
re = (reb + (n-1)*lshift(re, p-prevp))//n
im = (imb + (n-1)*lshift(im, p-prevp))//n
prevp = p
return re, im
def mpc_nthroot(z, n, prec, rnd=round_fast):
"""
Complex n-th root.
Use Newton method as in the real case when it is faster,
otherwise use z**(1/n)
"""
a, b = z
if a[0] == 0 and b == fzero:
re = mpf_nthroot(a, n, prec, rnd)
return (re, fzero)
if n < 2:
if n == 0:
return mpc_one
if n == 1:
return mpc_pos((a, b), prec, rnd)
if n == -1:
return mpc_div(mpc_one, (a, b), prec, rnd)
inverse = mpc_nthroot((a, b), -n, prec+5, reciprocal_rnd[rnd])
return mpc_div(mpc_one, inverse, prec, rnd)
if n <= 20:
prec2 = int(1.2 * (prec + 10))
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
pf = mpc_abs((a,b), prec)
if pf[-2] + pf[-1] > -10 and pf[-2] + pf[-1] < prec:
af = to_fixed(a, prec2)
bf = to_fixed(b, prec2)
re, im = mpc_nthroot_fixed(af, bf, n, prec2)
extra = 10
re = from_man_exp(re, -prec2-extra, prec2, rnd)
im = from_man_exp(im, -prec2-extra, prec2, rnd)
return re, im
fn = from_int(n)
prec2 = prec+10 + 10
nth = mpf_rdiv_int(1, fn, prec2)
re, im = mpc_pow((a, b), (nth, fzero), prec2, rnd)
re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
return re, im
def mpc_cbrt(z, prec, rnd=round_fast):
"""
Complex cubic root.
"""
return mpc_nthroot(z, 3, prec, rnd)
def mpc_exp(z, prec, rnd=round_fast):
"""
Complex exponential function.
We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i)
for the computation. This formula is very nice because it is
pefectly stable; since we just do real multiplications, the only
numerical errors that can creep in are single-ulp rounding errors.
The formula is efficient since mpmath's real exp is quite fast and
since we can compute cos and sin simultaneously.
It is no problem if a and b are large; if the implementations of
exp/cos/sin are accurate and efficient for all real numbers, then
so is this function for all complex numbers.
"""
a, b = z
if a == fzero:
return mpf_cos_sin(b, prec, rnd)
if b == fzero:
return mpf_exp(a, prec, rnd), fzero
mag = mpf_exp(a, prec+4, rnd)
c, s = mpf_cos_sin(b, prec+4, rnd)
re = mpf_mul(mag, c, prec, rnd)
im = mpf_mul(mag, s, prec, rnd)
return re, im
def mpc_log(z, prec, rnd=round_fast):
re = mpf_log_hypot(z[0], z[1], prec, rnd)
im = mpc_arg(z, prec, rnd)
return re, im
def mpc_cos(z, prec, rnd=round_fast):
"""Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) -
sin(a)*sinh(b)*i.
The same comments apply as for the complex exp: only real
multiplications are pewrormed, so no cancellation errors are
possible. The formula is also efficient since we can compute both
pairs (cos, sin) and (cosh, sinh) in single stwps."""
a, b = z
if b == fzero:
return mpf_cos(a, prec, rnd), fzero
if a == fzero:
return mpf_cosh(b, prec, rnd), fzero
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(c, ch, prec, rnd)
im = mpf_mul(s, sh, prec, rnd)
return re, mpf_neg(im)
def mpc_sin(z, prec, rnd=round_fast):
"""Complex sine. We have sin(a+bi) = sin(a)*cosh(b) +
cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional
comments."""
a, b = z
if b == fzero:
return mpf_sin(a, prec, rnd), fzero
if a == fzero:
return fzero, mpf_sinh(b, prec, rnd)
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(s, ch, prec, rnd)
im = mpf_mul(c, sh, prec, rnd)
return re, im
def mpc_tan(z, prec, rnd=round_fast):
"""Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i
where M = cos(2a) + cosh(2b)."""
a, b = z
asign, aman, aexp, abc = a
bsign, bman, bexp, bbc = b
if b == fzero: return mpf_tan(a, prec, rnd), fzero
if a == fzero: return fzero, mpf_tanh(b, prec, rnd)
wp = prec + 15
a = mpf_shift(a, 1)
b = mpf_shift(b, 1)
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
# TODO: handle cancellation when c ~= -1 and ch ~= 1
mag = mpf_add(c, ch, wp)
re = mpf_div(s, mag, prec, rnd)
im = mpf_div(sh, mag, prec, rnd)
return re, im
def mpc_cos_pi(z, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_cos_pi(a, prec, rnd), fzero
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
return mpf_cosh(b, prec, rnd), fzero
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(c, ch, prec, rnd)
im = mpf_mul(s, sh, prec, rnd)
return re, mpf_neg(im)
def mpc_sin_pi(z, prec, rnd=round_fast):
a, b = z
if b == fzero:
return mpf_sin_pi(a, prec, rnd), fzero
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
return fzero, mpf_sinh(b, prec, rnd)
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
re = mpf_mul(s, ch, prec, rnd)
im = mpf_mul(c, sh, prec, rnd)
return re, im
def mpc_cos_sin(z, prec, rnd=round_fast):
a, b = z
if a == fzero:
ch, sh = mpf_cosh_sinh(b, prec, rnd)
return (ch, fzero), (fzero, sh)
if b == fzero:
c, s = mpf_cos_sin(a, prec, rnd)
return (c, fzero), (s, fzero)
wp = prec + 6
c, s = mpf_cos_sin(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
cre = mpf_mul(c, ch, prec, rnd)
cim = mpf_mul(s, sh, prec, rnd)
sre = mpf_mul(s, ch, prec, rnd)
sim = mpf_mul(c, sh, prec, rnd)
return (cre, mpf_neg(cim)), (sre, sim)
def mpc_cos_sin_pi(z, prec, rnd=round_fast):
a, b = z
if b == fzero:
c, s = mpf_cos_sin_pi(a, prec, rnd)
return (c, fzero), (s, fzero)
b = mpf_mul(b, mpf_pi(prec+5), prec+5)
if a == fzero:
ch, sh = mpf_cosh_sinh(b, prec, rnd)
return (ch, fzero), (fzero, sh)
wp = prec + 6
c, s = mpf_cos_sin_pi(a, wp)
ch, sh = mpf_cosh_sinh(b, wp)
cre = mpf_mul(c, ch, prec, rnd)
cim = mpf_mul(s, sh, prec, rnd)
sre = mpf_mul(s, ch, prec, rnd)
sim = mpf_mul(c, sh, prec, rnd)
return (cre, mpf_neg(cim)), (sre, sim)
def mpc_cosh(z, prec, rnd=round_fast):
"""Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i)."""
a, b = z
return mpc_cos((b, mpf_neg(a)), prec, rnd)
def mpc_sinh(z, prec, rnd=round_fast):
"""Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i)."""
a, b = z
b, a = mpc_sin((b, a), prec, rnd)
return a, b
def mpc_tanh(z, prec, rnd=round_fast):
"""Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i)."""
a, b = z
b, a = mpc_tan((b, a), prec, rnd)
return a, b
# TODO: avoid loss of accuracy
def mpc_atan(z, prec, rnd=round_fast):
a, b = z
# atan(z) = (I/2)*(log(1-I*z) - log(1+I*z))
# x = 1-I*z = 1 + b - I*a
# y = 1+I*z = 1 - b + I*a
wp = prec + 15
x = mpf_add(fone, b, wp), mpf_neg(a)
y = mpf_sub(fone, b, wp), a
l1 = mpc_log(x, wp)
l2 = mpc_log(y, wp)
a, b = mpc_sub(l1, l2, prec, rnd)
# (I/2) * (a+b*I) = (-b/2 + a/2*I)
v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1)
# Subtraction at infinity gives correct real part but
# wrong imaginary part (should be zero)
if v[1] == fnan and mpc_is_inf(z):
v = (v[0], fzero)
return v
beta_crossover = from_float(0.6417)
alpha_crossover = from_float(1.5)
def acos_asin(z, prec, rnd, n):
""" complex acos for n = 0, asin for n = 1
The algorithm is described in
T.E. Hull, T.F. Fairgrieve and P.T.P. Tang
'Implementing the Complex Arcsine and Arcosine Functions
using Exception Handling',
ACM Trans. on Math. Software Vol. 23 (1997), p299
The complex acos and asin can be defined as
acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1))
asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1))
where z = a + I*b
alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha
r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2)
These expressions are rewritten in different ways in different
regions, delimited by two crossovers alpha_crossover and beta_crossover,
and by abs(a) <= 1, in order to improve the numerical accuracy.
"""
a, b = z
wp = prec + 10
# special cases with real argument
if b == fzero:
am = mpf_sub(fone, mpf_abs(a), wp)
# case abs(a) <= 1
if not am[0]:
if n == 0:
return mpf_acos(a, prec, rnd), fzero
else:
return mpf_asin(a, prec, rnd), fzero
# cases abs(a) > 1
else:
# case a < -1
if a[0]:
pi = mpf_pi(prec, rnd)
c = mpf_acosh(mpf_neg(a), prec, rnd)
if n == 0:
return pi, mpf_neg(c)
else:
return mpf_neg(mpf_shift(pi, -1)), c
# case a > 1
else:
c = mpf_acosh(a, prec, rnd)
if n == 0:
return fzero, c
else:
pi = mpf_pi(prec, rnd)
return mpf_shift(pi, -1), mpf_neg(c)
asign = bsign = 0
if a[0]:
a = mpf_neg(a)
asign = 1
if b[0]:
b = mpf_neg(b)
bsign = 1
am = mpf_sub(fone, a, wp)
ap = mpf_add(fone, a, wp)
r = mpf_hypot(ap, b, wp)
s = mpf_hypot(am, b, wp)
alpha = mpf_shift(mpf_add(r, s, wp), -1)
beta = mpf_div(a, alpha, wp)
b2 = mpf_mul(b,b, wp)
# case beta <= beta_crossover
if not mpf_sub(beta_crossover, beta, wp)[0]:
if n == 0:
re = mpf_acos(beta, wp)
else:
re = mpf_asin(beta, wp)
else:
# to compute the real part in this region use the identity
# asin(beta) = atan(beta/sqrt(1-beta**2))
# beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a)
# alpha + a is numerically accurate; alpha - a can have
# cancellations leading to numerical inaccuracies, so rewrite
# it in differente ways according to the region
Ax = mpf_add(alpha, a, wp)
# case a <= 1
if not am[0]:
# c = b*b/(r + (a+1)); d = (s + (1-a))
# alpha - a = (1/2)*(c + d)
# case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a)
# case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d)))
c = mpf_div(b2, mpf_add(r, ap, wp), wp)
d = mpf_add(s, am, wp)
re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1)
if n == 0:
re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp)
else:
re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp)
else:
# c = Ax/(r + (a+1)); d = Ax/(s - (1-a))
# alpha - a = (1/2)*(c + d)
# case n = 0: re = atan(b*sqrt(c + d)/2/a)
# case n = 1: re = atan(a/(b*sqrt(c + d)/2)
c = mpf_div(Ax, mpf_add(r, ap, wp), wp)
d = mpf_div(Ax, mpf_sub(s, am, wp), wp)
re = mpf_shift(mpf_add(c, d, wp), -1)
re = mpf_mul(b, mpf_sqrt(re, wp), wp)
if n == 0:
re = mpf_atan(mpf_div(re, a, wp), wp)
else:
re = mpf_atan(mpf_div(a, re, wp), wp)
# to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover
# replace it with 1 + Am1 + sqrt(Am1*(alpha+1)))
# where Am1 = alpha -1
# if alpha <= alpha_crossover:
if not mpf_sub(alpha_crossover, alpha, wp)[0]:
c1 = mpf_div(b2, mpf_add(r, ap, wp), wp)
# case a < 1
if mpf_neg(am)[0]:
# Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a))
c2 = mpf_add(s, am, wp)
c2 = mpf_div(b2, c2, wp)
Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
else:
# Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a)))
c2 = mpf_sub(s, am, wp)
Am1 = mpf_shift(mpf_add(c1, c2, wp), -1)
# im = log(1 + Am1 + sqrt(Am1*(alpha+1)))
im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp)
im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp)
else:
# im = log(alpha + sqrt(alpha*alpha - 1))
im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp)
im = mpf_log(mpf_add(alpha, im, wp), wp)
if asign:
if n == 0:
re = mpf_sub(mpf_pi(wp), re, wp)
else:
re = mpf_neg(re)
if not bsign and n == 0:
im = mpf_neg(im)
if bsign and n == 1:
im = mpf_neg(im)
re = normalize(re[0], re[1], re[2], re[3], prec, rnd)
im = normalize(im[0], im[1], im[2], im[3], prec, rnd)
return re, im
def mpc_acos(z, prec, rnd=round_fast):
return acos_asin(z, prec, rnd, 0)
def mpc_asin(z, prec, rnd=round_fast):
return acos_asin(z, prec, rnd, 1)
def mpc_asinh(z, prec, rnd=round_fast):
# asinh(z) = I * asin(-I z)
a, b = z
a, b = mpc_asin((b, mpf_neg(a)), prec, rnd)
return mpf_neg(b), a
def mpc_acosh(z, prec, rnd=round_fast):
# acosh(z) = -I * acos(z) for Im(acos(z)) <= 0
# +I * acos(z) otherwise
a, b = mpc_acos(z, prec, rnd)
if b[0] or b == fzero:
return mpf_neg(b), a
else:
return b, mpf_neg(a)
def mpc_atanh(z, prec, rnd=round_fast):
# atanh(z) = (log(1+z)-log(1-z))/2
wp = prec + 15
a = mpc_add(z, mpc_one, wp)
b = mpc_sub(mpc_one, z, wp)
a = mpc_log(a, wp)
b = mpc_log(b, wp)
v = mpc_shift(mpc_sub(a, b, wp), -1)
# Subtraction at infinity gives correct imaginary part but
# wrong real part (should be zero)
if v[0] == fnan and mpc_is_inf(z):
v = (fzero, v[1])
return v
def mpc_fibonacci(z, prec, rnd=round_fast):
re, im = z
if im == fzero:
return (mpf_fibonacci(re, prec, rnd), fzero)
size = max(abs(re[2]+re[3]), abs(re[2]+re[3]))
wp = prec + size + 20
a = mpf_phi(wp)
b = mpf_add(mpf_shift(a, 1), fnone, wp)
u = mpc_pow((a, fzero), z, wp)
v = mpc_cos_pi(z, wp)
v = mpc_div(v, u, wp)
u = mpc_sub(u, v, wp)
u = mpc_div_mpf(u, b, prec, rnd)
return u
def mpf_expj(x, prec, rnd='f'):
raise ComplexResult
def mpc_expj(z, prec, rnd='f'):
re, im = z
if im == fzero:
return mpf_cos_sin(re, prec, rnd)
if re == fzero:
return mpf_exp(mpf_neg(im), prec, rnd), fzero
ey = mpf_exp(mpf_neg(im), prec+10)
c, s = mpf_cos_sin(re, prec+10)
re = mpf_mul(ey, c, prec, rnd)
im = mpf_mul(ey, s, prec, rnd)
return re, im
def mpf_expjpi(x, prec, rnd='f'):
raise ComplexResult
def mpc_expjpi(z, prec, rnd='f'):
re, im = z
if im == fzero:
return mpf_cos_sin_pi(re, prec, rnd)
sign, man, exp, bc = im
wp = prec+10
if man:
wp += max(0, exp+bc)
im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp))
if re == fzero:
return mpf_exp(im, prec, rnd), fzero
ey = mpf_exp(im, prec+10)
c, s = mpf_cos_sin_pi(re, prec+10)
re = mpf_mul(ey, c, prec, rnd)
im = mpf_mul(ey, s, prec, rnd)
return re, im
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as _lbmp
mpc_exp = _lbmp.mpc_exp
mpc_sqrt = _lbmp.mpc_sqrt
except (ImportError, AttributeError):
print("Warning: Sage imports in libmpc failed")
|
flashycud/timestack | refs/heads/master | django/contrib/gis/db/backends/spatialite/client.py | 623 | from django.db.backends.sqlite3.client import DatabaseClient
class SpatiaLiteClient(DatabaseClient):
executable_name = 'spatialite'
|
pvital/patchew | refs/heads/master | tests/__init__.py | 12133432 | |
bigswitch/tempest | refs/heads/master | tempest/services/network/json/__init__.py | 12133432 | |
usc-isi/essex-baremetal-support | refs/heads/master | smoketests/run_tests.py | 17 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Unittest runner for Nova.
To run all tests
python run_tests.py
To run a single test:
python run_tests.py test_compute:ComputeTestCase.test_run_terminate
To run a single test module:
python run_tests.py test_compute
or
python run_tests.py api.test_wsgi
"""
import gettext
import os
import unittest
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nose import config
from nose import core
from nose import result
from smoketests import flags
FLAGS = flags.FLAGS
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
from win32console import GetStdHandle, STD_OUT_HANDLE, \
FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
FOREGROUND_INTENSITY
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
result.TextTestResult.__init__(self, *args, **kw)
self._last_case = None
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
def getDescription(self, test):
return str(test)
# NOTE(vish): copied from unittest with edit to add color
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.colorizer.write("OK", 'green')
self.stream.writeln()
elif self.dots:
self.stream.write('.')
self.stream.flush()
# NOTE(vish): copied from unittest with edit to add color
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.colorizer.write("FAIL", 'red')
self.stream.writeln()
elif self.dots:
self.stream.write('F')
self.stream.flush()
# NOTE(vish): copied from nose with edit to add color
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
if result.isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
# Might get patched into a streamless result
if stream is not None:
if self.showAll:
message = [label]
detail = result._exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
return
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
if self.showAll:
self.colorizer.write("ERROR", 'red')
self.stream.writeln()
elif self.dots:
stream.write('E')
def startTest(self, test):
unittest.TestResult.startTest(self, test)
current_case = test.test.__class__.__name__
if self.showAll:
if current_case != self._last_case:
self.stream.writeln(current_case)
self._last_case = current_case
self.stream.write(
' %s' % str(test.test._testMethodName).ljust(60))
self.stream.flush()
class NovaTestRunner(core.TextTestRunner):
def _makeResult(self):
return NovaTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
if __name__ == '__main__':
if not os.getenv('EC2_ACCESS_KEY'):
print _('Missing EC2 environment variables. Please ' \
'source the appropriate novarc file before ' \
'running this test.')
sys.exit(1)
argv = FLAGS(sys.argv)
testdir = os.path.abspath("./")
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
workingDir=testdir,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
asedunov/intellij-community | refs/heads/master | python/lib/Lib/encodings/cp1257.py | 593 | """ Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1257',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\ufffe' # 0x83 -> UNDEFINED
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\xa8' # 0x8D -> DIAERESIS
u'\u02c7' # 0x8E -> CARON
u'\xb8' # 0x8F -> CEDILLA
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\xaf' # 0x9D -> MACRON
u'\u02db' # 0x9E -> OGONEK
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe' # 0xA1 -> UNDEFINED
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe' # 0xA5 -> UNDEFINED
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xe6' # 0xBF -> LATIN SMALL LETTER AE
u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bugsnag/bugsnag-python | refs/heads/master | example/django21/demo/__init__.py | 12133432 | |
leekchan/django_test | refs/heads/master | django/contrib/staticfiles/templatetags/__init__.py | 12133432 | |
brianlsharp/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/f2py/tests/test_mixed.py | 59 | import os
import math
from numpy.testing import *
from numpy import array
import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestMixed(util.F2PyTest):
sources = [_path('src', 'mixed', 'foo.f'),
_path('src', 'mixed', 'foo_fixed.f90'),
_path('src', 'mixed', 'foo_free.f90')]
@dec.slow
def test_all(self):
assert self.module.bar11() == 11
assert self.module.foo_fixed.bar12() == 12
assert self.module.foo_free.bar13() == 13
if __name__ == "__main__":
import nose
nose.runmodule()
|
pombredanne/bulbs2 | refs/heads/master | bulbs2/utils/signals/slugs.py | 2 | from django.conf import settings
from django.template.defaultfilters import slugify as _slugify
MAX_SLUG_LENGTH = getattr(settings, "MAX_SLUG_LENGTH", 50)
def slugify(value, length=MAX_SLUG_LENGTH):
"""runs the given value through django's slugify filter and slices it to a given length
:param value: the value you want turned into a slug
:type value: str
:param length: the maximum length of the slug
:type length: int
:return: the slugified version of the input value sliced to the length value
:rtype: str
"""
slug = _slugify(value)[:length]
while slug.endswith("-"):
slug = slug[:-1]
return slug
|
PPCDroid/external-qemu | refs/heads/master | gen-charmap.py | 3 | #!/usr/bin/python
#
# a python script used to generate some C constant tables from a key charmap file
#
# usage:
# progname file.kcm > charmap-tab.h
#
import sys, os, string, re
header = """\
#include "android_charmap.h"
/* the following is automatically generated by the 'gen-charmap.py' script
* do not touch. the generation command was:
* gen-charmap.py\
"""
header2 = """
*/
"""
kmap_header = """\
static const AKeyEntry _%(name)s_keys[] =
{
/* keycode base caps fn caps+fn number */
"""
kmap_footer = """\
};
static const AKeyCharmap _%(name)s_charmap =
{
_%(name)s_keys,
%(count)d,
"%(name)s"
};
"""
re_mapname = re.compile( r".*/(\w+).kcm" )
re_start = re.compile( r"(\w+)\s*(.*)" )
re_char = re.compile( r"('.')\s*(.*)" )
re_hex = re.compile( r"(0x\w+)\s*(.*)" )
specials = { 'COMMA': 'Comma',
'PERIOD': 'Period',
'AT': 'At',
'LEFT_BRACKET': 'LeftBracket',
'RIGHT_BRACKET': 'RightBracket',
'SLASH': 'Slash',
'BACKSLASH': 'Backslash',
'GRAVE': 'Grave',
'MINUS': 'Minus',
'EQUALS': 'Equals',
'SEMICOLON': 'Semicolon',
'APOSTROPHE': 'Apostrophe',
'SPACE': 'Space',
'ENTER': 'Enter',
'TAB': 'Tab'
}
entries = []
def match_char_or_hex(line):
m = re_char.match(line)
if not m:
m = re_hex.match(line)
return m
def quote(s):
if s == "'''":
s = "'\\''"
elif s == "'\\'":
s = "'\\\\'"
return s
def process_line(line,result):
m = re_start.match(line)
if not m:
print "bad bad line: " + line
return -1
keycode = m.group(1)
line = m.group(2)
m = match_char_or_hex(line)
if not m:
print "character expected in: " + line
return -1
base = quote(m.group(1))
line = m.group(2)
m = match_char_or_hex(line)
if not m:
print "character expected in: " + line
return -1
caps = quote(m.group(1))
line = m.group(2)
m = match_char_or_hex(line)
if not m:
print "character expected in: " + line
return -1
fn = quote(m.group(1))
line = m.group(2)
m = match_char_or_hex(line)
if not m:
print "character expected in: " + line
return -1
caps_fn = quote(m.group(1))
line = m.group(2)
m = match_char_or_hex(line)
if not m:
print "character expected in: " + line
return -1
number = quote(m.group(1))
if specials.has_key(keycode):
keycode = specials[keycode]
keycode = "kKeyCode" + keycode
result.append( (keycode,base,caps,fn,caps_fn,number) )
return 0
def process_file( file ):
result = []
fp = open(file,"rb")
for line in fp.xreadlines():
line = line.strip()
if not line: # skip empty lines
continue
if line[0] == '#' or line[0] == '[': # skip
continue
if process_line(line,result) < 0:
break
fp.close()
return result
class KMap:
def __init__(self,name,results):
self.name = name
self.results = results
def dump(self):
t = { 'name': self.name, 'count':len(self.results) }
print kmap_header % t
for item in self.results:
print " { %-22s, %5s, %5s, %5s, %6s, %5s }," % item
print kmap_footer % t
kmaps = []
if len(sys.argv) < 2:
print "usage: progname charmap.kcm [charmap2.kcm ...] > charmap-tab.h"
else:
genline = ""
for filepath in sys.argv[1:]:
m = re_mapname.match(filepath)
if not m:
print "%s is not a keyboard charmap name" % filepath
os.exit(1)
mapname = m.group(1)
genline = genline + " " + mapname + ".kcm"
for filepath in sys.argv[1:]:
m = re_mapname.match(filepath)
mapname = m.group(1)
result = process_file( filepath )
kmap = KMap(mapname,result)
kmaps.append(kmap)
print header + genline + header2
for kmap in kmaps:
kmap.dump()
print "const AKeyCharmap* android_charmaps[%d] = {" % len(kmaps),
comma = ""
for kmap in kmaps:
print "%s&_%s_charmap" % (comma, kmap.name),
comma = ", "
print "};"
print "const int android_charmap_count = %d;" % len(kmaps)
|
QInfer/python-qinfer | refs/heads/master | src/qinfer/_due.py | 5 | # emacs: at the end of the file
# ex: set sts=4 ts=4 sw=4 et:
# pylint: skip-file
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### #
"""
Stub file for a guaranteed safe import of duecredit constructs: if duecredit
is not available.
To use it, place it into your project codebase to be imported, e.g. copy as
cp stub.py /path/tomodule/module/due.py
Note that it might be better to avoid naming it duecredit.py to avoid shadowing
installed duecredit.
Then use in your code as
from .due import due, Doi, BibTeX
See https://github.com/duecredit/duecredit/blob/master/README.md for examples.
Origin: Originally a part of the duecredit
Copyright: 2015-2016 DueCredit developers
License: BSD-2
"""
__version__ = '0.0.5'
class InactiveDueCreditCollector(object):
"""Just a stub at the Collector which would not do anything"""
def _donothing(self, *args, **kwargs):
"""Perform no good and no bad"""
pass
def dcite(self, *args, **kwargs):
"""If I could cite I would"""
def nondecorating_decorator(func):
return func
return nondecorating_decorator
cite = load = add = _donothing
def __repr__(self):
return self.__class__.__name__ + '()'
def _donothing_func(*args, **kwargs):
"""Perform no good and no bad"""
pass
try:
from duecredit import due, BibTeX, Doi, Url
if 'due' in locals() and not hasattr(due, 'cite'):
raise RuntimeError(
"Imported due lacks .cite. DueCredit is now disabled")
except Exception as e:
if type(e).__name__ != 'ImportError':
import logging
logging.getLogger("duecredit").error(
"Failed to import duecredit due to %s" % str(e))
# Initiate due stub
due = InactiveDueCreditCollector()
BibTeX = Doi = Url = _donothing_func
# Emacs mode definitions
# Local Variables:
# mode: python
# py-indent-offset: 4
# tab-width: 4
# indent-tabs-mode: nil
# End:
|
roks0n/nomadboard | refs/heads/master | nomadboard/nomadboard/scraper/tests.py | 341 | # Create your tests here.
|
iFighting/OpenTLD | refs/heads/master | datasets/minbox.py | 14 | import csv
import sys
import cv2
def box_size(b):
return min(b[2]-b[0],b[3]-b[1])
boxes = csv.reader(open(sys.argv[1],'rb'),delimiter=',')
min_box=1000000
for box in boxes:
if (box[0] != 'NaN'):
box = map(float,box)
if (box_size(box) < min_box):
min_box = box_size(box)
print min_box
|
igemsoftware2017/USTC-Software-2017 | refs/heads/master | tests/core/tasks/myplugin/models.py | 69 | from django.db import models # noqa
# Create your models here.
|
konstruktoid/ansible-upstream | refs/heads/devel | lib/ansible/modules/system/sysctl.py | 42 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, David "DaviXX" CHANIAL <david.chanial@gmail.com>
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: sysctl
short_description: Manage entries in sysctl.conf.
description:
- This module manipulates sysctl entries and optionally performs a C(/sbin/sysctl -p) after changing them.
version_added: "1.0"
options:
name:
description:
- The dot-separated path (aka I(key)) specifying the sysctl variable.
required: true
aliases: [ 'key' ]
value:
description:
- Desired value of the sysctl key.
aliases: [ 'val' ]
state:
description:
- Whether the entry should be present or absent in the sysctl file.
choices: [ "present", "absent" ]
default: present
ignoreerrors:
description:
- Use this option to ignore errors about unknown keys.
type: bool
default: 'no'
reload:
description:
- If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is
updated. If C(no), does not reload I(sysctl) even if the
C(sysctl_file) is updated.
type: bool
default: 'yes'
sysctl_file:
description:
- Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf).
default: /etc/sysctl.conf
sysctl_set:
description:
- Verify token value with the sysctl command and set with -w if necessary
type: bool
default: 'no'
version_added: 1.5
author: "David CHANIAL (@davixx) <david.chanial@gmail.com>"
'''
EXAMPLES = '''
# Set vm.swappiness to 5 in /etc/sysctl.conf
- sysctl:
name: vm.swappiness
value: 5
state: present
# Remove kernel.panic entry from /etc/sysctl.conf
- sysctl:
name: kernel.panic
state: absent
sysctl_file: /etc/sysctl.conf
# Set kernel.panic to 3 in /tmp/test_sysctl.conf
- sysctl:
name: kernel.panic
value: 3
sysctl_file: /tmp/test_sysctl.conf
reload: no
# Set ip forwarding on in /proc and do not reload the sysctl file
- sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
# Set ip forwarding on in /proc and in the sysctl file and reload if necessary
- sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
state: present
reload: yes
'''
# ==============================================================
import os
import tempfile
from ansible.module_utils.basic import get_platform, AnsibleModule
from ansible.module_utils.six import string_types
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
from ansible.module_utils._text import to_native
class SysctlModule(object):
def __init__(self, module):
self.module = module
self.args = self.module.params
self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True)
self.sysctl_file = self.args['sysctl_file']
self.proc_value = None # current token value in proc fs
self.file_value = None # current token value in file
self.file_lines = [] # all lines in the file
self.file_values = {} # dict of token values
self.changed = False # will change occur
self.set_proc = False # does sysctl need to set value
self.write_file = False # does the sysctl file need to be reloaded
self.process()
# ==============================================================
# LOGIC
# ==============================================================
def process(self):
self.platform = get_platform().lower()
# Whitespace is bad
self.args['name'] = self.args['name'].strip()
self.args['value'] = self._parse_value(self.args['value'])
thisname = self.args['name']
# get the current proc fs value
self.proc_value = self.get_token_curr_value(thisname)
# get the currect sysctl file value
self.read_sysctl_file()
if thisname not in self.file_values:
self.file_values[thisname] = None
# update file contents with desired token/value
self.fix_lines()
# what do we need to do now?
if self.file_values[thisname] is None and self.args['state'] == "present":
self.changed = True
self.write_file = True
elif self.file_values[thisname] is None and self.args['state'] == "absent":
self.changed = False
elif self.file_values[thisname] and self.args['state'] == "absent":
self.changed = True
self.write_file = True
elif self.file_values[thisname] != self.args['value']:
self.changed = True
self.write_file = True
# use the sysctl command or not?
if self.args['sysctl_set']:
if self.proc_value is None:
self.changed = True
elif not self._values_is_equal(self.proc_value, self.args['value']):
self.changed = True
self.set_proc = True
# Do the work
if not self.module.check_mode:
if self.write_file:
self.write_sysctl()
if self.write_file and self.args['reload']:
self.reload_sysctl()
if self.set_proc:
self.set_token_value(self.args['name'], self.args['value'])
def _values_is_equal(self, a, b):
"""Expects two string values. It will split the string by whitespace
and compare each value. It will return True if both lists are the same,
contain the same elements and the same order."""
if a is None or b is None:
return False
a = a.split()
b = b.split()
if len(a) != len(b):
return False
return len([i for i, j in zip(a, b) if i == j]) == len(a)
def _parse_value(self, value):
if value is None:
return ''
elif isinstance(value, bool):
if value:
return '1'
else:
return '0'
elif isinstance(value, string_types):
if value.lower() in BOOLEANS_TRUE:
return '1'
elif value.lower() in BOOLEANS_FALSE:
return '0'
else:
return value.strip()
else:
return value
# ==============================================================
# SYSCTL COMMAND MANAGEMENT
# ==============================================================
# Use the sysctl command to find the current value
def get_token_curr_value(self, token):
if self.platform == 'openbsd':
# openbsd doesn't support -e, just drop it
thiscmd = "%s -n %s" % (self.sysctl_cmd, token)
else:
thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
rc, out, err = self.module.run_command(thiscmd)
if rc != 0:
return None
else:
return out
# Use the sysctl command to set the current value
def set_token_value(self, token, value):
if len(value.split()) > 0:
value = '"' + value + '"'
if self.platform == 'openbsd':
# openbsd doesn't accept -w, but since it's not needed, just drop it
thiscmd = "%s %s=%s" % (self.sysctl_cmd, token, value)
elif self.platform == 'freebsd':
ignore_missing = ''
if self.args['ignoreerrors']:
ignore_missing = '-i'
# freebsd doesn't accept -w, but since it's not needed, just drop it
thiscmd = "%s %s %s=%s" % (self.sysctl_cmd, ignore_missing, token, value)
else:
ignore_missing = ''
if self.args['ignoreerrors']:
ignore_missing = '-e'
thiscmd = "%s %s -w %s=%s" % (self.sysctl_cmd, ignore_missing, token, value)
rc, out, err = self.module.run_command(thiscmd)
if rc != 0:
self.module.fail_json(msg='setting %s failed: %s' % (token, out + err))
else:
return rc
# Run sysctl -p
def reload_sysctl(self):
# do it
if self.platform == 'freebsd':
# freebsd doesn't support -p, so reload the sysctl service
rc, out, err = self.module.run_command('/etc/rc.d/sysctl reload')
elif self.platform == 'openbsd':
# openbsd doesn't support -p and doesn't have a sysctl service,
# so we have to set every value with its own sysctl call
for k, v in self.file_values.items():
rc = 0
if k != self.args['name']:
rc = self.set_token_value(k, v)
if rc != 0:
break
if rc == 0 and self.args['state'] == "present":
rc = self.set_token_value(self.args['name'], self.args['value'])
else:
# system supports reloading via the -p flag to sysctl, so we'll use that
sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]
if self.args['ignoreerrors']:
sysctl_args.insert(1, '-e')
rc, out, err = self.module.run_command(sysctl_args)
if rc != 0:
self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err))
# ==============================================================
# SYSCTL FILE MANAGEMENT
# ==============================================================
# Get the token value from the sysctl file
def read_sysctl_file(self):
lines = []
if os.path.isfile(self.sysctl_file):
try:
with open(self.sysctl_file, "r") as read_file:
lines = read_file.readlines()
except IOError as e:
self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, to_native(e)))
for line in lines:
line = line.strip()
self.file_lines.append(line)
# don't split empty lines or comments or line without equal sign
if not line or line.startswith(("#", ";")) or "=" not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
self.file_values[k] = v.strip()
# Fix the value in the sysctl file content
def fix_lines(self):
checked = []
self.fixed_lines = []
for line in self.file_lines:
if not line.strip() or line.strip().startswith(("#", ";")) or "=" not in line:
self.fixed_lines.append(line)
continue
tmpline = line.strip()
k, v = tmpline.split('=', 1)
k = k.strip()
v = v.strip()
if k not in checked:
checked.append(k)
if k == self.args['name']:
if self.args['state'] == "present":
new_line = "%s=%s\n" % (k, self.args['value'])
self.fixed_lines.append(new_line)
else:
new_line = "%s=%s\n" % (k, v)
self.fixed_lines.append(new_line)
if self.args['name'] not in checked and self.args['state'] == "present":
new_line = "%s=%s\n" % (self.args['name'], self.args['value'])
self.fixed_lines.append(new_line)
# Completely rewrite the sysctl file
def write_sysctl(self):
# open a tmp file
fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file))
f = open(tmp_path, "w")
try:
for l in self.fixed_lines:
f.write(l.strip() + "\n")
except IOError as e:
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, to_native(e)))
f.flush()
f.close()
# replace the real one
self.module.atomic_move(tmp_path, self.sysctl_file)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['key'], required=True),
value=dict(aliases=['val'], required=False, type='str'),
state=dict(default='present', choices=['present', 'absent']),
reload=dict(default=True, type='bool'),
sysctl_set=dict(default=False, type='bool'),
ignoreerrors=dict(default=False, type='bool'),
sysctl_file=dict(default='/etc/sysctl.conf', type='path')
),
supports_check_mode=True,
required_if=[('state', 'present', ['value'])],
)
if module.params['name'] is None:
module.fail_json(msg="name cannot be None")
if module.params['state'] == 'present' and module.params['value'] is None:
module.fail_json(msg="value cannot be None")
# In case of in-line params
if module.params['name'] == '':
module.fail_json(msg="name cannot be blank")
if module.params['state'] == 'present' and module.params['value'] == '':
module.fail_json(msg="value cannot be blank")
result = SysctlModule(module)
module.exit_json(changed=result.changed)
if __name__ == '__main__':
main()
|
andresriancho/django-moth | refs/heads/master | moth/views/vulnerabilities/core/headers.py | 1 | from django.shortcuts import render
from moth.views.base.vulnerable_template_view import VulnerableTemplateView
class EchoHeadersView(VulnerableTemplateView):
description = title = 'Echoes all request headers'
url_path = 'echo-headers.py'
KNOWN_HEADERS = ('CONTENT_LENGTH',)
def is_http_header(self, hname):
return hname.startswith('HTTP_') or hname in self.KNOWN_HEADERS
def translate_header(self, hname):
hname = hname.replace('HTTP_', '')
hname = hname.replace('_', '-')
hname = hname.lower()
hname = hname.title()
return hname
def get(self, request, *args, **kwds):
context = self.get_context_data()
html = ''
msg_fmt = 'Header "%s" with value "%s" <br/>\n'
for hname in request.META:
if self.is_http_header(hname):
html += msg_fmt % (self.translate_header(hname),
request.META[hname])
context['html'] = html
return render(request, self.template_name, context)
|
SAM-IT-SA/odoo | refs/heads/8.0 | openerp/addons/base/tests/test_ir_actions.py | 291 | import unittest2
from openerp.osv.orm import except_orm
import openerp.tests.common as common
from openerp.tools import mute_logger
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
cr, uid = self.cr, self.uid
# Models
self.ir_actions_server = self.registry('ir.actions.server')
self.ir_actions_client = self.registry('ir.actions.client')
self.ir_values = self.registry('ir.values')
self.ir_model = self.registry('ir.model')
self.ir_model_fields = self.registry('ir.model.fields')
self.res_partner = self.registry('res.partner')
self.res_country = self.registry('res.country')
# Data on which we will run the server action
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Model data
self.res_partner_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.partner')])[0]
self.res_partner_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'name')])[0]
self.res_partner_city_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'city')])[0]
self.res_partner_country_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'country_id')])[0]
self.res_partner_parent_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.partner'), ('name', '=', 'parent_id')])[0]
self.res_country_model_id = self.ir_model.search(cr, uid, [('model', '=', 'res.country')])[0]
self.res_country_name_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'name')])[0]
self.res_country_code_field_id = self.ir_model_fields.search(cr, uid, [('model', '=', 'res.country'), ('name', '=', 'code')])[0]
# create server action to
self.act_id = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
cr, uid = self.cr, self.uid
# Do: eval 'True' condition
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: eval False condition, that should be considered as True (void = True)
self.ir_actions_server.write(cr, uid, [self.act_id], {'condition': False})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
# Do: create contextual action
self.ir_actions_server.create_action(cr, uid, [self.act_id])
# Test: ir_values created
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 1, 'ir_actions_server: create_action should have created an entry in ir_values')
ir_value = self.ir_values.browse(cr, uid, ir_values_ids[0])
self.assertEqual(ir_value.value, 'ir.actions.server,%s' % self.act_id, 'ir_actions_server: created ir_values should reference the server action')
self.assertEqual(ir_value.model, 'res.partner', 'ir_actions_server: created ir_values should be linked to the action base model')
# Do: remove contextual action
self.ir_actions_server.unlink_action(cr, uid, [self.act_id])
# Test: ir_values removed
ir_values_ids = self.ir_values.search(cr, uid, [('name', '=', 'Run TestAction')])
self.assertEqual(len(ir_values_ids), 0, 'ir_actions_server: unlink_action should remove the ir_values record')
def test_10_code(self):
cr, uid = self.cr, self.uid
self.ir_actions_server.write(cr, uid, self.act_id, {
'state': 'code',
'code': """partner_name = obj.name + '_code'
self.pool["res.partner"].create(cr, uid, {"name": partner_name}, context=context)
workflow"""
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(pids), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_trigger(self):
cr, uid = self.cr, self.uid
# Data: code server action (at this point code-based actions should work)
act_id2 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction2',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'obj.write({"comment": "MyComment"})',
})
act_id3 = self.ir_actions_server.create(cr, uid, {
'name': 'TestAction3',
'type': 'ir.actions.server',
'condition': 'True',
'model_id': self.res_country_model_id,
'state': 'code',
'code': 'obj.write({"code": "ZZ"})',
})
# Data: create workflows
partner_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.partner',
'on_create': True,
})
partner_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerStart',
'wkf_id': partner_wf_id,
'flow_start': True
})
partner_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'PartnerTwo',
'wkf_id': partner_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id2,
})
partner_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'partner_trans',
'act_from': partner_act1_id,
'act_to': partner_act2_id
})
country_wf_id = self.registry('workflow').create(cr, uid, {
'name': 'TestWorkflow',
'osv': 'res.country',
'on_create': True,
})
country_act1_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryStart',
'wkf_id': country_wf_id,
'flow_start': True
})
country_act2_id = self.registry('workflow.activity').create(cr, uid, {
'name': 'CountryTwo',
'wkf_id': country_wf_id,
'kind': 'function',
'action': 'True',
'action_id': act_id3,
})
country_trs1_id = self.registry('workflow.transition').create(cr, uid, {
'signal': 'country_trans',
'act_from': country_act1_id,
'act_to': country_act2_id
})
# Data: re-create country and partner to benefit from the workflows
self.test_country_id = self.res_country.create(cr, uid, {
'name': 'TestingCountry2',
'code': 'T2',
})
self.test_country = self.res_country.browse(cr, uid, self.test_country_id)
self.test_partner_id = self.res_partner.create(cr, uid, {
'name': 'TestingPartner2',
'country_id': self.test_country_id,
})
self.test_partner = self.res_partner.browse(cr, uid, self.test_partner_id)
self.context = {
'active_id': self.test_partner_id,
'active_model': 'res.partner',
}
# Run the action on partner object itself ('base')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'trigger',
'use_relational_model': 'base',
'wkf_model_id': self.res_partner_model_id,
'wkf_model_name': 'res.partner',
'wkf_transition_id': partner_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_partner.refresh()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: incorrect signal trigger')
# Run the action on related country object ('relational')
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_relational_model': 'relational',
'wkf_model_id': self.res_country_model_id,
'wkf_model_name': 'res.country',
'wkf_field_id': self.res_partner_country_field_id,
'wkf_transition_id': country_trs1_id,
})
self.ir_actions_server.run(cr, uid, [self.act_id], self.context)
self.test_country.refresh()
self.assertEqual(self.test_country.code, 'ZZ', 'ir_actions_server: incorrect signal trigger')
# Clear workflow cache, otherwise openerp will try to create workflows even if it has been deleted
from openerp.workflow import clear_cache
clear_cache(cr, uid)
def test_30_client(self):
cr, uid = self.cr, self.uid
client_action_id = self.registry('ir.actions.client').create(cr, uid, {
'name': 'TestAction2',
'tag': 'Test',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'client_action',
'action_id': client_action_id,
})
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertEqual(res['name'], 'TestAction2', 'ir_actions_server: incorrect return result for a client action')
def test_40_crud_create(self):
cr, uid = self.cr, self.uid
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new',
'link_new_record': True,
'link_field_id': self.res_partner_parent_field_id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': _city})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.test_partner.refresh()
self.assertEqual(self.test_partner.parent_id.id, pids[0], 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_current',
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': 'TestCopyCurrent'}),
(0, 0, {'col1': self.res_partner_city_field_id, 'value': 'TestCity'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'TestCity', 'ir_actions_server: TODO')
self.assertEqual(partner.country_id.id, self.test_partner.country_id.id, 'ir_actions_server: TODO')
# Do: create a new record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'new_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'obj.name[0:2]', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_create',
'use_create': 'copy_other',
'crud_model_id': self.res_country_model_id,
'link_new_record': False,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'}),
(0, 0, {'col1': self.res_country_code_field_id, 'value': 'NY', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
country = self.res_country.browse(cr, uid, cids[0])
self.assertEqual(country.code, 'NY', 'ir_actions_server: TODO')
self.assertEqual(country.address_format, 'SuperFormat', 'ir_actions_server: TODO')
def test_50_crud_write(self):
cr, uid = self.cr, self.uid
_name = 'TestNew'
# Do: create a new record in the same model and link it
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'object_write',
'use_write': 'current',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field_id, 'value': _name})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', _name)])
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
partner = self.res_partner.browse(cr, uid, pids[0])
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
# Do: copy current record
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'other',
'crud_model_id': self.res_country_model_id,
'ref_object': 'res.country,%s' % self.test_country_id,
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'obj.name', 'type': 'equation'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'TestNew')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
# Do: copy a record in another model
self.ir_actions_server.write(cr, uid, [self.act_id], {'fields_lines': [[5]]})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'use_write': 'expression',
'crud_model_id': self.res_country_model_id,
'write_expression': 'object.country_id',
'fields_lines': [(0, 0, {'col1': self.res_country_name_field_id, 'value': 'NewCountry', 'type': 'value'})],
})
run_res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
cids = self.res_country.search(cr, uid, [('name', 'ilike', 'NewCountry')])
self.assertEqual(len(cids), 1, 'ir_actions_server: TODO')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_60_multi(self):
cr, uid = self.cr, self.uid
# Data: 2 server actions that will be nested
act1_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction1',
'sequence': 1,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
act2_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction2',
'sequence': 2,
'model_id': self.res_partner_model_id,
'state': 'object_create',
'use_create': 'copy_current',
})
act3_id = self.ir_actions_server.create(cr, uid, {
'name': 'Subaction3',
'sequence': 3,
'model_id': self.res_partner_model_id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_url"}',
})
self.ir_actions_server.write(cr, uid, [self.act_id], {
'state': 'multi',
'child_ids': [(6, 0, [act1_id, act2_id, act3_id])],
})
# Do: run the action
res = self.ir_actions_server.run(cr, uid, [self.act_id], context=self.context)
# Test: new partner created
pids = self.res_partner.search(cr, uid, [('name', 'ilike', 'TestingPartner (copy)')]) # currently res_partner overrides default['name'] whatever its value
self.assertEqual(len(pids), 1, 'ir_actions_server: TODO')
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_url')
# Test loops
with self.assertRaises(except_orm):
self.ir_actions_server.write(cr, uid, [self.act_id], {
'child_ids': [(6, 0, [self.act_id])]
})
if __name__ == '__main__':
unittest2.main()
|
mattias-ohlsson/anaconda | refs/heads/master | pyanaconda/script.py | 1 | #
# script.py - non-interactive, script based anaconda interface
#
# Copyright (C) 2011
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brian C. Lane <bcl@redhat.com>
#
from installinterfacebase import InstallInterfaceBase
import cmdline
from cmdline import setupProgressDisplay
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("anaconda")
stepToClasses = { "install" : "setupProgressDisplay",
"complete": "Finished" }
class InstallInterface(cmdline.InstallInterface):
def enableNetwork(self):
# Assume we want networking
return True
def display_step(self, step):
if stepToClasses.has_key(step):
s = "nextWin = %s" % (stepToClasses[step],)
exec s
nextWin(self.anaconda)
else:
errtxt = _("In interactive step can't continue. (%s)" % (step,))
print(errtxt)
raise RuntimeError(errtxt)
def Finished(anaconda):
""" Install is finished. Lets just exit.
"""
return 0
|
ryfeus/lambda-packs | refs/heads/master | Opencv_pil/source36/setuptools/py33compat.py | 34 | import dis
import array
import collections
try:
import html
except ImportError:
html = None
from setuptools.extern import six
from setuptools.extern.six.moves import html_parser
__metaclass__ = type
OpArg = collections.namedtuple('OpArg', 'opcode arg')
class Bytecode_compat:
def __init__(self, code):
self.code = code
def __iter__(self):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
bytes = array.array('b', self.code.co_code)
eof = len(self.code.co_code)
ptr = 0
extended_arg = 0
while ptr < eof:
op = bytes[ptr]
if op >= dis.HAVE_ARGUMENT:
arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg
ptr += 3
if op == dis.EXTENDED_ARG:
long_type = six.integer_types[-1]
extended_arg = arg * long_type(65536)
continue
else:
arg = None
ptr += 1
yield OpArg(op, arg)
Bytecode = getattr(dis, 'Bytecode', Bytecode_compat)
unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape)
|
Epirex/android_external_chromium_org | refs/heads/cm-11.0 | build/android/pylib/host_driven/setup.py | 27 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for instrumentation host-driven tests."""
import logging
import os
import sys
import types
import test_case
import test_info_collection
import test_runner
def _GetPythonFiles(root, files):
"""Returns all files from |files| that end in 'Test.py'.
Args:
root: A directory name with python files.
files: A list of file names.
Returns:
A list with all python files that match the testing naming scheme.
"""
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
def _InferImportNameFromFile(python_file):
"""Given a file, infer the import name for that file.
Example: /usr/foo/bar/baz.py -> baz.
Args:
python_file: Path to the Python file, ostensibly to import later.
Returns:
The module name for the given file.
"""
return os.path.splitext(os.path.basename(python_file))[0]
def _GetTestModules(host_driven_test_root, is_official_build):
"""Retrieve a list of python modules that match the testing naming scheme.
Walks the location of host-driven tests, imports them, and provides the list
of imported modules to the caller.
Args:
host_driven_test_root: The path to walk, looking for the
pythonDrivenTests or host_driven_tests directory
is_official_build: Whether to run only those tests marked 'official'
Returns:
A list of python modules under |host_driven_test_root| which match the
testing naming scheme. Each module should define one or more classes that
derive from HostDrivenTestCase.
"""
# By default run all host-driven tests under pythonDrivenTests or
# host_driven_tests.
host_driven_test_file_list = []
for root, _, files in os.walk(host_driven_test_root):
if (root.endswith('host_driven_tests') or
root.endswith('pythonDrivenTests') or
(is_official_build and (root.endswith('pythonDrivenTests/official') or
root.endswith('host_driven_tests/official')))):
host_driven_test_file_list += _GetPythonFiles(root, files)
host_driven_test_file_list.sort()
test_module_list = [_GetModuleFromFile(test_file)
for test_file in host_driven_test_file_list]
return test_module_list
def _GetModuleFromFile(python_file):
"""Gets the python module associated with a file by importing it.
Args:
python_file: File to import.
Returns:
The module object.
"""
sys.path.append(os.path.dirname(python_file))
import_name = _InferImportNameFromFile(python_file)
return __import__(import_name)
def _GetTestsFromClass(test_case_class, **kwargs):
"""Returns one test object for each test method in |test_case_class|.
Test methods are methods on the class which begin with 'test'.
Args:
test_case_class: Class derived from HostDrivenTestCase which contains zero
or more test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects, each initialized for a particular test method.
"""
test_names = [m for m in dir(test_case_class)
if _IsTestMethod(m, test_case_class)]
return [test_case_class(name, **kwargs) for name in test_names]
def _GetTestsFromModule(test_module, **kwargs):
"""Gets a list of test objects from |test_module|.
Args:
test_module: Module from which to get the set of test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects each initialized for a particular test method
defined in |test_module|.
"""
tests = []
for name in dir(test_module):
attr = getattr(test_module, name)
if _IsTestCaseClass(attr):
tests.extend(_GetTestsFromClass(attr, **kwargs))
return tests
def _IsTestCaseClass(test_class):
return (type(test_class) is types.TypeType and
issubclass(test_class, test_case.HostDrivenTestCase) and
test_class is not test_case.HostDrivenTestCase)
def _IsTestMethod(attrname, test_case_class):
"""Checks whether this is a valid test method.
Args:
attrname: The method name.
test_case_class: The test case class.
Returns:
True if test_case_class.'attrname' is callable and it starts with 'test';
False otherwise.
"""
attr = getattr(test_case_class, attrname)
return callable(attr) and attrname.startswith('test')
def _GetAllTests(test_root, is_official_build, **kwargs):
"""Retrieve a list of host-driven tests defined under |test_root|.
Args:
test_root: Path which contains host-driven test files.
is_official_build: Whether this is an official build.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
List of test case objects, one for each available test method.
"""
if not test_root:
return []
all_tests = []
test_module_list = _GetTestModules(test_root, is_official_build)
for module in test_module_list:
all_tests.extend(_GetTestsFromModule(module, **kwargs))
return all_tests
def InstrumentationSetup(host_driven_test_root, official_build,
instrumentation_options):
"""Creates a list of host-driven instrumentation tests and a runner factory.
Args:
host_driven_test_root: Directory where the host-driven tests are.
official_build: True if this is an official build.
instrumentation_options: An InstrumentationOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_collection = test_info_collection.TestInfoCollection()
all_tests = _GetAllTests(
host_driven_test_root, official_build,
instrumentation_options=instrumentation_options)
test_collection.AddTests(all_tests)
available_tests = test_collection.GetAvailableTests(
instrumentation_options.annotations,
instrumentation_options.exclude_annotations,
instrumentation_options.test_filter)
logging.debug('All available tests: ' + str(
[t.tagged_name for t in available_tests]))
def TestRunnerFactory(device, shard_index):
return test_runner.HostDrivenTestRunner(
device, shard_index,
instrumentation_options.tool,
instrumentation_options.push_deps,
instrumentation_options.cleanup_test_files)
return (TestRunnerFactory, available_tests)
|
ubuntu-gr/OlesOiGlosses | refs/heads/master | paradeigma.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
*
* Για εκτέλεση του προγράμματος:
* python paradeigma.py
*
* Αποτέλεσμα:
* Γεια σου, κόσμε!
* Εκτέλεση if: Αληθές και όχι ψευδές!
* Εκτέλεση βρόγχου: 0 1 2 3 4 5 6 7 8 9
* Αυτή είναι μια υπορουτίνα με αριθμό 5.
'''
def subroutine(number):
print ("Αυτή είναι μια υπορουτίνα με αριθμό %d." % number)
i=0;
print ("Γεια σου, κόσμε!\n")
if (1):
print ("Εκτέλεση if: Αληθές και όχι ψευδές!\n")
print("Εκτέλεση βρόγχου: ")
for i in range(0, 10):
print ("%d " % i,end='')
print ("\n")
subroutine(5)
|
badlogicmanpreet/nupic | refs/heads/master | src/nupic/swarming/hypersearch/errorcodes.py | 50 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
class ErrorCodes(object):
streamReading = "E10001"
tooManyModelErrs = "E10002"
hypersearchLogicErr = "E10003"
productionModelErr = "E10004" # General PM error
modelCommandFormatErr = "E10005" # Invalid model command request object
tooManyFailedWorkers = "E10006"
unspecifiedErr = "E10007"
modelInputLostErr = "E10008" # Input stream was garbage-collected
requestOutOfRange = "E10009" # If a request range is invalid
invalidType = "E10010" # Invalid
|
apanju/GMIO_Odoo | refs/heads/8.0 | addons/account_bank_statement_extensions/__init__.py | 442 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import res_partner_bank
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
songfj/calibre | refs/heads/master | src/calibre/gui2/convert/snb_output.py | 24 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2010, Li Fanxi <lifanxi@freemindworld.com>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.convert.snb_output_ui import Ui_Form
from calibre.gui2.convert import Widget
newline_model = None
class PluginWidget(Widget, Ui_Form):
TITLE = _('SNB Output')
HELP = _('Options specific to')+' SNB '+_('output')
COMMIT_NAME = 'snb_output'
ICON = I('mimetypes/snb.png')
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['snb_insert_empty_line', 'snb_dont_indent_first_line',
'snb_hide_chapter_name','snb_full_screen'])
self.db, self.book_id = db, book_id
self.initialize_options(get_option, get_help, db, book_id)
|
JST9723/stj-fusion-byte1 | refs/heads/master | lib/werkzeug/wsgi.py | 312 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL for the current
request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and \
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
|
snowchan/flasky | refs/heads/master | app/api_1_0/decorators.py | 150 | from functools import wraps
from flask import g
from .errors import forbidden
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.current_user.can(permission):
return forbidden('Insufficient permissions')
return f(*args, **kwargs)
return decorated_function
return decorator
|
simalytics/askbot-devel | refs/heads/master | askbot/deps/django_authopenid/models.py | 10 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
import hashlib, random, sys, os, time
__all__ = ['Nonce', 'Association', 'UserAssociation',
'UserPasswordQueueManager', 'UserPasswordQueue']
class Nonce(models.Model):
""" openid nonce """
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
def __unicode__(self):
return u"Nonce: %s" % self.id
class Association(models.Model):
""" association openid url and lifetime """
server_url = models.TextField(max_length=2047)
handle = models.CharField(max_length=255)
secret = models.TextField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField(max_length=64)
def __unicode__(self):
return u"Association: %s, %s" % (self.server_url, self.handle)
class UserAssociation(models.Model):
"""
model to manage association between openid and user
"""
#todo: rename this field so that it sounds good for other methods
#for exaple, for password provider this will hold password
openid_url = models.CharField(blank=False, max_length=255)
user = models.ForeignKey(User)
#in the future this must be turned into an
#association with a Provider record
#to hold things like login badge, etc
provider_name = models.CharField(max_length=64, default='unknown')
last_used_timestamp = models.DateTimeField(null=True)
class Meta(object):
unique_together = (
('user','provider_name'),
('openid_url', 'provider_name')
)
def __unicode__(self):
return "Openid %s with user %s" % (self.openid_url, self.user)
class UserPasswordQueueManager(models.Manager):
""" manager for UserPasswordQueue object """
def get_new_confirm_key(self):
"Returns key that isn't being used."
# The random module is seeded when this Apache child is created.
# Use SECRET_KEY as added salt.
while 1:
confirm_key = hashlib.md5("%s%s%s%s" % (
random.randint(0, sys.maxint - 1), os.getpid(),
time.time(), settings.SECRET_KEY)).hexdigest()
try:
self.get(confirm_key=confirm_key)
except self.model.DoesNotExist:
break
return confirm_key
class UserPasswordQueue(models.Model):
"""
model for new password queue.
"""
user = models.ForeignKey(User, unique=True)
new_password = models.CharField(max_length=30)
confirm_key = models.CharField(max_length=40)
objects = UserPasswordQueueManager()
def __unicode__(self):
return self.user.username
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.